hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0944f3d49e659e1d21a529bdc1c50f31cfa4118e | 2,991 | py | Python | utils/enocean_equipment_profiles.py | sari-rev00/pyEnOceanDriver | ae94f121a4b0d4a6a93afbbfe094488ba0aaa2ad | [
"Apache-2.0"
] | 1 | 2022-01-25T01:43:40.000Z | 2022-01-25T01:43:40.000Z | utils/enocean_equipment_profiles.py | sari-rev00/pyEnOceanDriver | ae94f121a4b0d4a6a93afbbfe094488ba0aaa2ad | [
"Apache-2.0"
] | null | null | null | utils/enocean_equipment_profiles.py | sari-rev00/pyEnOceanDriver | ae94f121a4b0d4a6a93afbbfe094488ba0aaa2ad | [
"Apache-2.0"
] | null | null | null | # rocker switch -------------------
def F6_02_04(list_data):
# exception handler -----------
if len(list_data) != 1:
print("data length doesn't match specified EEP.")
return None
# data derivation -------------
int_data = int(list_data[0], 0)
dict_ret = dict()
dict_ret["e_bow"] = int(0)
dict_ret["BI"] = int(0)
dict_ret["BO"] = int(0)
dict_ret["AI"] = int(0)
dict_ret["AO"] = int(0)
if 0b10000000 & int_data:
dict_ret["e_bow"] = int(1)
if 0b00001000 & int_data:
dict_ret["BI"] = int(1)
if 0b00000100 & int_data:
dict_ret["BO"] = int(1)
if 0b00000010 & int_data:
dict_ret["AI"] = int(1)
if 0b00000001 & int_data:
dict_ret["AO"] = int(1)
return dict_ret
# magnet contact sensor ------------
def D5_00_01(list_data):
# exception handler ------------
if len(list_data) != 1:
print("data length doesn't match specified EEP.")
return None
# data derivation -------------
int_data = int(list_data[0], 0)
dict_ret = dict()
dict_ret["state"] = "open"
dict_ret["learn"] = int(0)
if 0b00000001 & int_data:
dict_ret["state"] = "close"
if 0b00001000 & int_data:
dict_ret["learn"] = int(1)
return dict_ret
# temperature sensor ----------------
def A5_02_05(list_data):
round_num = 1
# exception handler ------------
if len(list_data) != 4:
print("data length doesn't match specified EEP.")
return None
# data derivation -------------
int_data_0 = int(list_data[-1], 0)
int_data_1 = int(list_data[-2], 0)
dict_ret = dict()
dict_ret["temperature_cdeg"] = None
dict_ret["learn"] = int(0)
dict_ret["temperature_cdeg"] = round((0xFF - int_data_1) * 40 / 255, round_num)
if 0b00001000 & int_data_0:
dict_ret["learn"] = int(1)
return dict_ret
# tempearture and humidity sensor ---------------
def A5_04_01(list_data):
round_num_temp = 1
round_num_humid = 1
# exception handler ------------
if len(list_data) != 4:
print("data length doesn't match specified EEP.")
return None
# data derivation -------------
dict_ret = dict()
# humidity ---
int_data_2 = int(list_data[1], 0)
dict_ret["humidity_percent"] = round((int_data_2 * 100 / 255), round_num_humid)
# temperature ---
int_data_1 = int(list_data[2], 0)
dict_ret["temperature_cdeg"] = round((int_data_1 * 40 / 255), round_num_temp)
# misc ---
dict_ret["learn"] = int(0)
dict_ret["avail_temp"] = int(0)
int_data_0 = int(list_data[3], 0)
if not 0b00001000 & int_data_0:
dict_ret["learn"] = int(1) # teach-in
if 0b00000010 & int_data_0:
dict_ret["avail_temp"] = int(1) # temperature sensor available
return dict_ret
# function pointer -----------------------
eep_get_data = {
"F6-02-04": F6_02_04,
"D5-00-01": D5_00_01,
"A5-02-05": A5_02_05,
"A5-04-01": A5_04_01,
} | 30.520408 | 83 | 0.573387 |
3c1ae9364e93cf5387a1b3d066c13ba19592a0a1 | 37,927 | py | Python | pandas/core/panel.py | theandygross/pandas | d420334d7e3e52fd812e0648bb3ef8e90c10cc82 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/panel.py | theandygross/pandas | d420334d7e3e52fd812e0648bb3ef8e90c10cc82 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/panel.py | theandygross/pandas | d420334d7e3e52fd812e0648bb3ef8e90c10cc82 | [
"BSD-3-Clause"
] | null | null | null | """
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
import operator
import sys
import numpy as np
from pandas.core.common import (PandasError, _mut_exclusive,
_try_sort, _default_index, _infer_dtype)
from pandas.core.index import (Factor, Index, MultiIndex, _ensure_index,
_get_combined_index, _union_indexes)
from pandas.core.indexing import _NDFrameIndexer
from pandas.core.internals import BlockManager, make_block, form_blocks
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.util import py3compat
from pandas.util.decorators import deprecate
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas._tseries as lib
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=['time', 'panel']):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'), (1961, 'B'),
(1962, 'B'), (1960, 'C'), (1961, 'C'), (1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'), (1961, 'B'),
(1961, 'C'), (1962, 'A'), (1962, 'B'), (1962, 'C')], dtype=object)
"""
time, panels = _ensure_like_indices(time, panels)
time_factor = Factor(time)
panel_factor = Factor(panels)
labels = [time_factor.labels, panel_factor.labels]
levels = [time_factor.levels, panel_factor.levels]
return MultiIndex(levels, labels, sortorder=None, names=names)
class PanelError(Exception):
pass
def _arith_method(func, name):
# work only for scalars
def f(self, other):
if not np.isscalar(other):
raise ValueError('Simple arithmetic with Panel can only be '
'done with scalar values')
return self._combine(other, func)
f.__name__ = name
return f
def _panel_arith_method(op, name):
def f(self, other, axis='items'):
"""
Wrapper method for %s
Parameters
----------
other : DataFrame or Panel class
axis : {'items', 'major', 'minor'}
Axis to broadcast over
Returns
-------
Panel
"""
return self._combine(other, op, axis=axis)
f.__name__ = name
if __debug__:
f.__doc__ = f.__doc__ % str(op)
return f
_agg_doc = """
Return %(desc)s over requested axis
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
%(outname)s : DataFrame
"""
_na_info = """
NA/null values are %s.
If all values are NA, result will be NA"""
def _add_docs(method, desc, outname):
doc = _agg_doc % {'desc' : desc,
'outname' : outname}
method.__doc__ = doc
class Panel(NDFrame):
_AXIS_NUMBERS = {
'items' : 0,
'major_axis' : 1,
'minor_axis' : 2
}
_AXIS_ALIASES = {
'major' : 'major_axis',
'minor' : 'minor_axis'
}
_AXIS_NAMES = {
0 : 'items',
1 : 'major_axis',
2 : 'minor_axis'
}
# major
_default_stat_axis = 1
_het_axis = 0
items = lib.AxisProperty(0)
major_axis = lib.AxisProperty(1)
minor_axis = lib.AxisProperty(2)
__add__ = _arith_method(operator.add, '__add__')
__sub__ = _arith_method(operator.sub, '__sub__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__floordiv__ = _arith_method(operator.floordiv, '__floordiv__')
__mul__ = _arith_method(operator.mul, '__mul__')
__pow__ = _arith_method(operator.pow, '__pow__')
__radd__ = _arith_method(operator.add, '__radd__')
__rmul__ = _arith_method(operator.mul, '__rmul__')
__rsub__ = _arith_method(lambda x, y: y - x, '__rsub__')
__rtruediv__ = _arith_method(lambda x, y: y / x, '__rtruediv__')
__rfloordiv__ = _arith_method(lambda x, y: y // x, '__rfloordiv__')
__rpow__ = _arith_method(lambda x, y: y ** x, '__rpow__')
if not py3compat.PY3:
__div__ = _arith_method(operator.div, '__div__')
__rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__')
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=1
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
if data is None:
data = {}
passed_axes = [items, major_axis, minor_axis]
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
else: # pragma: no cover
raise PandasError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
@classmethod
def _from_axes(cls, data, axes):
# for construction from BlockManager
if isinstance(data, BlockManager):
return cls(data)
else:
items, major, minor = axes
return cls(data, items=items, major_axis=major,
minor_axis=minor, copy=False)
def _init_dict(self, data, axes, dtype=None):
items, major, minor = axes
# prefilter if items passed
if items is not None:
items = _ensure_index(items)
data = dict((k, v) for k, v in data.iteritems() if k in items)
else:
items = Index(_try_sort(data.keys()))
for k, v in data.iteritems():
if not isinstance(v, DataFrame):
data[k] = DataFrame(v)
if major is None:
indexes = [v.index for v in data.values()]
major = _union_indexes(indexes)
if minor is None:
indexes = [v.columns for v in data.values()]
minor = _union_indexes(indexes)
axes = [items, major, minor]
reshaped_data = data.copy() # shallow
# homogenize
item_shape = (1, len(major), len(minor))
for k in items:
if k not in data:
values = np.empty(item_shape, dtype=dtype)
values.fill(np.nan)
reshaped_data[k] = values
else:
v = data[k]
v = v.reindex(index=major, columns=minor, copy=False)
if dtype is not None:
v = v.astype(dtype)
values = v.values
shape = values.shape
reshaped_data[k] = values.reshape((1,) + shape)
# segregates dtypes and forms blocks matching to columns
blocks = form_blocks(reshaped_data, axes)
mgr = BlockManager(blocks, axes).consolidate()
return mgr
@property
def shape(self):
return len(self.items), len(self.major_axis), len(self.minor_axis)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
Returns
-------
Panel
"""
from collections import defaultdict
orient = orient.lower()
if orient == 'minor':
new_data = defaultdict(dict)
for col, df in data.iteritems():
for item, s in df.iteritems():
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('only recognize items or minor for orientation')
data, index, columns = _homogenize_dict(data, intersect=intersect,
dtype=dtype)
items = Index(sorted(data.keys()))
return Panel(data, items, index, columns)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = _prep_ndarray(data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
items = fixed_axes[0]
block = make_block(values, items, items)
return BlockManager([block], fixed_axes)
def __repr__(self):
class_name = str(self.__class__)
I, N, K = len(self.items), len(self.major_axis), len(self.minor_axis)
dims = 'Dimensions: %d (items) x %d (major) x %d (minor)' % (I, N, K)
if len(self.major_axis) > 0:
major = 'Major axis: %s to %s' % (self.major_axis[0],
self.major_axis[-1])
else:
major = 'Major axis: None'
if len(self.minor_axis) > 0:
minor = 'Minor axis: %s to %s' % (self.minor_axis[0],
self.minor_axis[-1])
else:
minor = 'Minor axis: None'
if len(self.items) > 0:
items = 'Items: %s to %s' % (self.items[0], self.items[-1])
else:
items = 'Items: None'
output = '%s\n%s\n%s\n%s\n%s' % (class_name, dims, items, major, minor)
return output
def __iter__(self):
return iter(self.items)
def iteritems(self):
for item in self.items:
yield item, self[item]
# Name that won't get automatically converted to items by 2to3. items is
# already in use for the first axis.
iterkv = iteritems
def _get_plane_axes(self, axis):
"""
"""
axis = self._get_axis_name(axis)
if axis == 'major_axis':
index = self.minor_axis
columns = self.items
if axis == 'minor_axis':
index = self.major_axis
columns = self.items
elif axis == 'items':
index = self.major_axis
columns = self.minor_axis
return index, columns
@property
def _constructor(self):
return Panel
# Fancy indexing
_ix = None
@property
def ix(self):
if self._ix is None:
self._ix = _NDFrameIndexer(self)
return self._ix
def _wrap_array(self, arr, axes, copy=False):
items, major, minor = axes
return self._constructor(arr, items=items, major_axis=major,
minor_axis=minor, copy=copy)
fromDict = from_dict
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparsePanel
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse import SparsePanel
frames = dict(self.iterkv())
return SparsePanel(frames, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=kind,
default_fill_value=fill_value)
# TODO: needed?
def keys(self):
return list(self.items)
def _get_values(self):
self._consolidate_inplace()
return self._data.as_matrix()
values = property(fget=_get_values)
#----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, item, major, minor):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
Returns
-------
value : scalar value
"""
# hm, two layers to the onion
frame = self._get_item_cache(item)
return frame.get_value(major, minor)
def set_value(self, item, major, minor, value):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
try:
frame = self._get_item_cache(item)
frame.set_value(major, minor, value)
return self
except KeyError:
ax1, ax2, ax3 = self._expand_axes((item, major, minor))
result = self.reindex(items=ax1, major=ax2, minor=ax3, copy=False)
likely_dtype = com._infer_dtype(value)
made_bigger = not np.array_equal(ax1, self.items)
# how to make this logic simpler?
if made_bigger:
com._possibly_cast_item(result, item, likely_dtype)
return result.set_value(item, major, minor, value)
def _box_item_values(self, key, values):
return DataFrame(values, index=self.major_axis, columns=self.minor_axis)
def __getattr__(self, name):
"""After regular attribute access, try looking up the name of an item.
This allows simpler access to items for interactive use."""
if name in self.items:
return self[name]
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def _slice(self, slobj, axis=0):
new_data = self._data.get_slice(slobj, axis=axis)
return self._constructor(new_data)
def __setitem__(self, key, value):
_, N, K = self.shape
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis,
columns=self.minor_axis)
mat = value.values
elif isinstance(value, np.ndarray):
assert(value.shape == (N, K))
mat = np.asarray(value)
elif np.isscalar(value):
dtype = _infer_dtype(value)
mat = np.empty((N, K), dtype=dtype)
mat.fill(value)
mat = mat.reshape((1, N, K))
NDFrame._set_item(self, key, mat)
def pop(self, item):
"""
Return item slice from panel and delete from panel
Parameters
----------
key : object
Must be contained in panel's items
Returns
-------
y : DataFrame
"""
return NDFrame.pop(self, item)
def __getstate__(self):
"Returned pickled representation of the panel"
return self._data
def __setstate__(self, state):
# old Panel pickle
if isinstance(state, BlockManager):
self._data = state
elif len(state) == 4: # pragma: no cover
self._unpickle_panel_compat(state)
else: # pragma: no cover
raise ValueError('unrecognized pickle')
self._item_cache = {}
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
index, columns = self._get_plane_axes(axis)
return frame.reindex(index=index, columns=columns)
def reindex(self, major=None, items=None, minor=None, method=None,
major_axis=None, minor_axis=None, copy=True):
"""
Conform panel to new axis or axes
Parameters
----------
major : Index or sequence, default None
Can also use 'major_axis' keyword
items : Index or sequence, default None
minor : Index or sequence, default None
Can also use 'minor_axis' keyword
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
Returns
-------
Panel (new object)
"""
result = self
major = _mut_exclusive(major, major_axis)
minor = _mut_exclusive(minor, minor_axis)
if major is not None:
result = result._reindex_axis(major, method, 1, copy)
if minor is not None:
result = result._reindex_axis(minor, method, 2, copy)
if items is not None:
result = result._reindex_axis(items, method, 0, copy)
if result is self and copy:
raise ValueError('Must specify at least one axis')
return result
def reindex_like(self, other, method=None):
"""
Reindex Panel to match indices of another Panel
Parameters
----------
other : Panel
method : string or None
Returns
-------
reindexed : Panel
"""
# todo: object columns
return self.reindex(major=other.major_axis, items=other.items,
minor=other.minor_axis, method=method)
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif np.isscalar(other):
new_values = func(self.values, other)
return Panel(new_values, self.items, self.major_axis,
self.minor_axis)
def __neg__(self):
return -1 * self
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return Panel(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items + other.items
major = self.major_axis + other.major_axis
minor = self.minor_axis + other.minor_axis
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
result_values = func(this.values, other.values)
return Panel(result_values, items, major, minor)
def fillna(self, value=None, method='pad'):
"""
Fill NaN values using the specified method.
Member Series / TimeSeries are filled separately.
Parameters
----------
value : any kind (should be same type as array)
Value to use to fill holes (e.g. 0)
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
Returns
-------
y : DataFrame
See also
--------
DataFrame.reindex, DataFrame.asfreq
"""
if value is None:
result = {}
for col, s in self.iterkv():
result[col] = s.fillna(method=method, value=value)
return Panel.from_dict(result)
else:
new_data = self._data.fillna(value)
return Panel(new_data)
add = _panel_arith_method(operator.add, 'add')
subtract = sub = _panel_arith_method(operator.sub, 'subtract')
multiply = mul = _panel_arith_method(operator.mul, 'multiply')
try:
divide = div = _panel_arith_method(operator.div, 'divide')
except AttributeError: # pragma: no cover
# Python 3
divide = div = _panel_arith_method(operator.truediv, 'divide')
def major_xs(self, key, copy=True):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
copy : boolean, default False
Copy data
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
"""
return self.xs(key, axis=1, copy=copy)
def minor_xs(self, key, copy=True):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
copy : boolean, default False
Copy data
Returns
-------
y : DataFrame
index -> major axis, columns -> items
"""
return self.xs(key, axis=2, copy=copy)
def xs(self, key, axis=1, copy=True):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : DataFrame
"""
if axis == 0:
data = self[key]
if copy:
data = data.copy()
return data
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=copy)
return DataFrame(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def swapaxes(self, axis1='major', axis2='minor'):
"""
Interchange axes and swap values axes appropriately
Returns
-------
y : Panel (new object)
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
raise ValueError('Cannot specify the same axis')
mapping = {i : j, j : i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(3))
new_values = self.values.swapaxes(i, j).copy()
return Panel(new_values, *new_axes)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
mask = com.notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
major_labels = np.arange(N).repeat(K)[selector]
# Anyone think of a better way to do this? np.repeat does not
# do what I want
minor_labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
minor_labels = minor_labels.ravel()[selector]
index = MultiIndex(levels=[self.major_axis, self.minor_axis],
labels=[major_labels, minor_labels],
names=['major', 'minor'])
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def filter(self, items):
"""
Restrict items in panel to input list
Parameters
----------
items : sequence
Returns
-------
y : Panel
"""
intersection = self.items.intersection(items)
return self.reindex(items=intersection)
def apply(self, func, axis='major'):
"""
Apply
Parameters
----------
func : numpy function
Signature should match numpy.{sum, mean, var, std} etc.
axis : {'major', 'minor', 'items'}
fill_value : boolean, default True
Replace NaN values with specified first
Returns
-------
result : DataFrame or Panel
"""
i = self._get_axis_number(axis)
result = np.apply_along_axis(func, i, self.values)
return self._wrap_result(result, axis=axis)
def _reduce(self, op, axis=0, skipna=True):
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, copy=True)
result = f(self.values)
index, columns = self._get_plane_axes(axis_name)
if axis_name != 'items':
result = result.T
return DataFrame(result, index=index, columns=columns)
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
index, columns = self._get_plane_axes(axis)
if axis != 'items':
result = result.T
return DataFrame(result, index=index, columns=columns)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i)
return self._wrap_result(result, axis)
def sum(self, axis='major', skipna=True):
return self._reduce(nanops.nansum, axis=axis, skipna=skipna)
_add_docs(sum, 'sum', 'sum')
def mean(self, axis='major', skipna=True):
return self._reduce(nanops.nanmean, axis=axis, skipna=skipna)
_add_docs(mean, 'mean', 'mean')
def var(self, axis='major', skipna=True):
return self._reduce(nanops.nanvar, axis=axis, skipna=skipna)
_add_docs(var, 'unbiased variance', 'variance')
def std(self, axis='major', skipna=True):
return self.var(axis=axis, skipna=skipna).apply(np.sqrt)
_add_docs(std, 'unbiased standard deviation', 'stdev')
def skew(self, axis='major', skipna=True):
return self._reduce(nanops.nanskew, axis=axis, skipna=skipna)
_add_docs(std, 'unbiased skewness', 'skew')
def prod(self, axis='major', skipna=True):
return self._reduce(nanops.nanprod, axis=axis, skipna=skipna)
_add_docs(prod, 'product', 'prod')
def compound(self, axis='major', skipna=True):
return (1 + self).prod(axis=axis, skipna=skipna) - 1
_add_docs(compound, 'compounded percentage', 'compounded')
def median(self, axis='major', skipna=True):
return self._reduce(nanops.nanmedian, axis=axis, skipna=skipna)
_add_docs(median, 'median', 'median')
def max(self, axis='major', skipna=True):
return self._reduce(nanops.nanmax, axis=axis, skipna=skipna)
_add_docs(max, 'maximum', 'maximum')
def min(self, axis='major', skipna=True):
return self._reduce(nanops.nanmin, axis=axis, skipna=skipna)
_add_docs(min, 'minimum', 'minimum')
def shift(self, lags, axis='major'):
"""
Shift major or minor axis by specified number of lags. Drops periods
Parameters
----------
lags : int
Needs to be a positive number currently
axis : {'major', 'minor'}
Returns
-------
shifted : Panel
"""
values = self.values
items = self.items
major_axis = self.major_axis
minor_axis = self.minor_axis
if axis == 'major':
values = values[:, :-lags, :]
major_axis = major_axis[lags:]
elif axis == 'minor':
values = values[:, :, :-lags]
minor_axis = minor_axis[lags:]
else:
raise ValueError('Invalid axis')
return Panel(values, items=items, major_axis=major_axis,
minor_axis=minor_axis)
def truncate(self, before=None, after=None, axis='major'):
"""Function truncates a sorted Panel before and/or after some
particular values on the requested axis
Parameters
----------
before : date
Left boundary
after : date
Right boundary
axis : {'major', 'minor', 'items'}
Returns
-------
Panel
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
beg_slice, end_slice = index.slice_locs(before, after)
new_index = index[beg_slice:end_slice]
return self.reindex(**{axis : new_index})
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.tools.merge import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing multiple '
'panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
WidePanel = Panel
LongPanel = DataFrame
def _prep_ndarray(values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
assert(values.ndim == 3)
return values
def _homogenize_dict(frames, intersect=True, dtype=None):
"""
Conform set of DataFrame-like objects to either an intersection
of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned frames, index, columns
"""
result = {}
adj_frames = {}
for k, v in frames.iteritems():
if isinstance(v, dict):
adj_frames[k] = DataFrame(v)
else:
adj_frames[k] = v
all_indexes = [df.index for df in adj_frames.values()]
all_columns = [df.columns for df in adj_frames.values()]
index = _get_combined_index(all_indexes, intersect=intersect)
columns = _get_combined_index(all_columns, intersect=intersect)
for key, frame in adj_frames.iteritems():
result[key] = frame.reindex(index=index, columns=columns,
copy=False)
return result, index, columns
def _monotonic(arr):
return not (arr[1:] < arr[:-1]).any()
def install_ipython_completers(): # pragma: no cover
"""Register the Panel type with IPython's tab completion machinery, so
that it knows about accessing column names as attributes."""
from IPython.utils.generics import complete_object
@complete_object.when_type(Panel)
def complete_dataframe(obj, prev_completions):
return prev_completions + [c for c in obj.items \
if isinstance(c, basestring) and py3compat.isidentifier(c)]
# Importing IPython brings in about 200 modules, so we want to avoid it unless
# we're in IPython (when those modules are loaded anyway).
if "IPython" in sys.modules: # pragma: no cover
try:
install_ipython_completers()
except Exception:
pass
| 31.267106 | 80 | 0.571308 |
bc150f4c595637d7b15acd1fca8b7276297ac2d4 | 9,066 | py | Python | python3/vimspector/stack_trace.py | tinmarino/vimspector | 68c47edabb627705e6c8beff1909f63a5041e6cd | [
"Apache-2.0"
] | null | null | null | python3/vimspector/stack_trace.py | tinmarino/vimspector | 68c47edabb627705e6c8beff1909f63a5041e6cd | [
"Apache-2.0"
] | null | null | null | python3/vimspector/stack_trace.py | tinmarino/vimspector | 68c47edabb627705e6c8beff1909f63a5041e6cd | [
"Apache-2.0"
] | null | null | null | # vimspector - A multi-language debugging system for Vim
# Copyright 2018 Ben Jackson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import vim
import os
import logging
from vimspector import utils
class StackTraceView( object ):
def __init__( self, session, connection, buf ):
self._logger = logging.getLogger( __name__ )
utils.SetUpLogging( self._logger )
self._buf = buf
self._session = session
self._connection = connection
self._currentThread = None
self._currentFrame = None
self._threads = []
self._sources = {}
utils.SetUpScratchBuffer( self._buf, 'vimspector.StackTrace' )
vim.current.buffer = self._buf
vim.command( 'nnoremap <buffer> <CR> :call vimspector#GoToFrame()<CR>' )
self._line_to_frame = {}
self._line_to_thread = {}
# TODO: We really need a proper state model
#
# AWAIT_CONNECTION -- OnServerReady / RequestThreads --> REQUESTING_THREADS
# REQUESTING -- OnGotThreads / RequestScopes --> REQUESTING_SCOPES
#
# When we attach using gdbserver, this whole thing breaks because we request
# the threads over and over and get duff data back on later threads.
self._requesting_threads = False
def GetCurrentThreadId( self ):
return self._currentThread
def GetCurrentFrame( self ):
return self._currentFrame
def Clear( self ):
self._currentFrame = None
self._currentThread = None
self._threads = []
self._sources = {}
with utils.ModifiableScratchBuffer( self._buf ):
utils.ClearBuffer( self._buf )
def ConnectionUp( self, connection ):
self._connection = connection
self._requesting_threads = False
def ConnectionClosed( self ):
self.Clear()
self._connection = None
def Reset( self ):
self.Clear()
# TODO: delete the buffer ?
def LoadThreads( self, infer_current_frame ):
pending_request = False
if self._requesting_threads:
pending_request = True
return
def consume_threads( message ):
self._requesting_threads = False
if not message[ 'body' ][ 'threads' ]:
if pending_request:
# We may have hit a thread event, so try again.
self.LoadThreads( infer_current_frame )
return
else:
# This is a protocol error. It is required to return at least one!
utils.UserMessage( 'Server returned no threads. Is it running?',
persist = True )
self._threads.clear()
for thread in message[ 'body' ][ 'threads' ]:
self._threads.append( thread )
if infer_current_frame and thread[ 'id' ] == self._currentThread:
self._LoadStackTrace( thread, True )
elif infer_current_frame and self._currentThread is None:
self._currentThread = thread[ 'id' ]
self._LoadStackTrace( thread, True )
self._DrawThreads()
self._requesting_threads = True
self._connection.DoRequest( consume_threads, {
'command': 'threads',
} )
def _DrawThreads( self ):
self._line_to_frame.clear()
self._line_to_thread.clear()
with utils.ModifiableScratchBuffer( self._buf ):
utils.ClearBuffer( self._buf )
for thread in self._threads:
icon = '+' if '_frames' not in thread else '-'
line = utils.AppendToBuffer(
self._buf,
'{0} Thread: {1}'.format( icon, thread[ 'name' ] ) )
self._line_to_thread[ line ] = thread
self._DrawStackTrace( thread )
def _LoadStackTrace( self, thread, infer_current_frame ):
def consume_stacktrace( message ):
thread[ '_frames' ] = message[ 'body' ][ 'stackFrames' ]
if infer_current_frame:
for frame in thread[ '_frames' ]:
if self._JumpToFrame( frame ):
break
self._DrawThreads()
self._connection.DoRequest( consume_stacktrace, {
'command': 'stackTrace',
'arguments': {
'threadId': thread[ 'id' ],
}
} )
def ExpandFrameOrThread( self ):
if vim.current.buffer != self._buf:
return
current_line = vim.current.window.cursor[ 0 ]
if current_line in self._line_to_frame:
self._JumpToFrame( self._line_to_frame[ current_line ] )
elif current_line in self._line_to_thread:
thread = self._line_to_thread[ current_line ]
if '_frames' in thread:
del thread[ '_frames' ]
with utils.RestoreCursorPosition():
self._DrawThreads()
else:
self._LoadStackTrace( thread, False )
def _JumpToFrame( self, frame ):
def do_jump():
if 'line' in frame and frame[ 'line' ] > 0:
self._currentFrame = frame
return self._session.SetCurrentFrame( self._currentFrame )
source = frame.get( 'source', {} )
if source.get( 'sourceReference', 0 ) > 0:
def handle_resolved_source( resolved_source ):
frame[ 'source' ] = resolved_source
do_jump()
self._ResolveSource( source, handle_resolved_source )
# The assumption here is that we _will_ eventually find something to jump
# to
return True
else:
return do_jump()
def OnStopped( self, event ):
if 'threadId' in event:
self._currentThread = event[ 'threadId' ]
elif event.get( 'allThreadsStopped', False ) and self._threads:
self._currentThread = self._threads[ 0 ][ 'id' ]
if self._currentThread is not None:
for thread in self._threads:
if thread[ 'id' ] == self._currentThread:
self._LoadStackTrace( thread, True )
return
self.LoadThreads( True )
def OnThreadEvent( self, event ):
if event[ 'reason' ] == 'started' and self._currentThread is None:
self._currentThread = event[ 'threadId' ]
self.LoadThreads( True )
def Continue( self ):
if self._currentThread is None:
utils.UserMessage( 'No current thread', persist = True )
return
self._session._connection.DoRequest( None, {
'command': 'continue',
'arguments': {
'threadId': self._currentThread,
},
} )
self._session.ClearCurrentFrame()
self.LoadThreads( True )
def Pause( self ):
if self._currentThread is None:
utils.UserMessage( 'No current thread', persist = True )
return
self._session._connection.DoRequest( None, {
'command': 'pause',
'arguments': {
'threadId': self._currentThread,
},
} )
def _DrawStackTrace( self, thread ):
if '_frames' not in thread:
return
stackFrames = thread[ '_frames' ]
for frame in stackFrames:
if frame.get( 'source' ):
source = frame[ 'source' ]
else:
source = { 'name': '<unknown>' }
if 'name' not in source:
source[ 'name' ] = os.path.basename( source.get( 'path', 'unknwon' ) )
if frame.get( 'presentationHint' ) == 'label':
# Sigh. FOr some reason, it's OK for debug adapters to completely ignore
# the protocol; it seems that the chrome adapter sets 'label' and
# doesn't set 'line'
line = utils.AppendToBuffer(
self._buf,
' {0}: {1}'.format( frame[ 'id' ], frame[ 'name' ] ) )
else:
line = utils.AppendToBuffer(
self._buf,
' {0}: {1}@{2}:{3}'.format( frame[ 'id' ],
frame[ 'name' ],
source[ 'name' ],
frame[ 'line' ] ) )
self._line_to_frame[ line ] = frame
def _ResolveSource( self, source, and_then ):
source_reference = int( source[ 'sourceReference' ] )
try:
and_then( self._sources[ source_reference ] )
except KeyError:
# We must retrieve the source contents from the server
self._logger.debug( "Requesting source: %s", source )
def consume_source( msg ):
self._sources[ source_reference ] = source
buf_name = os.path.join( '_vimspector_tmp', source[ 'name' ] )
self._logger.debug( "Received source %s: %s", buf_name, msg )
buf = utils.BufferForFile( buf_name )
utils.SetUpScratchBuffer( buf, buf_name )
source[ 'path' ] = buf_name
with utils.ModifiableScratchBuffer( buf ):
utils.SetBufferContents( buf, msg[ 'body' ][ 'content' ] )
and_then( self._sources[ source_reference ] )
self._session._connection.DoRequest( consume_source, {
'command': 'source',
'arguments': {
'sourceReference': source[ 'sourceReference' ],
'source': source
}
} )
| 30.628378 | 80 | 0.62784 |
fb6df8f0d749789fef53451700961648b50b16cd | 7,933 | py | Python | ROAR/control_module/pid_controller.py | bcwingnut/ROAR | 8c4210d25625c0d72949cb8c55d4262e937d7e40 | [
"Apache-2.0"
] | 1 | 2021-01-26T00:33:54.000Z | 2021-01-26T00:33:54.000Z | ROAR/control_module/pid_controller.py | bcwingnut/ROAR | 8c4210d25625c0d72949cb8c55d4262e937d7e40 | [
"Apache-2.0"
] | null | null | null | ROAR/control_module/pid_controller.py | bcwingnut/ROAR | 8c4210d25625c0d72949cb8c55d4262e937d7e40 | [
"Apache-2.0"
] | 5 | 2020-12-15T08:31:40.000Z | 2021-08-13T21:05:26.000Z | from pydantic import BaseModel, Field
from ROAR.control_module.controller import Controller
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
from ROAR.utilities_module.data_structures_models import Transform, Location
from collections import deque
import numpy as np
import math
import logging
from ROAR.agent_module.agent import Agent
from typing import Tuple
import json
from pathlib import Path
class PIDController(Controller):
def __init__(self, agent, steering_boundary: Tuple[float, float],
throttle_boundary: Tuple[float, float], **kwargs):
super().__init__(agent, **kwargs)
self.max_speed = self.agent.agent_settings.max_speed
self.throttle_boundary = throttle_boundary
self.steering_boundary = steering_boundary
self.config = json.load(Path(agent.agent_settings.pid_config_file_path).open(mode='r'))
self.long_pid_controller = LongPIDController(agent=agent,
throttle_boundary=throttle_boundary,
max_speed=self.max_speed, config=self.config["longitudinal_controller"],
errAlpha=0.2)
self.lat_pid_controller = LatPIDController(
agent=agent,
config=self.config["latitudinal_controller"],
steering_boundary=steering_boundary
)
self.logger = logging.getLogger(__name__)
#self.num_steps = 0
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl:
steering, errBoi, posBoi, velBoi, way_dir = self.lat_pid_controller.run_in_series(next_waypoint=next_waypoint)
# feed the error into the longitudinal controller order to slow down when off target
throttle = self.long_pid_controller.run_in_series(next_waypoint=next_waypoint,
target_speed=kwargs.get("target_speed", self.max_speed),
errBoi=np.abs(errBoi))
'''
# return zero controls every fifth step to see if that does anything
if self.num_steps % 10 < 3:
steering = 0
throttle = 0
self.num_steps += 1
'''
# we have current position x, y, z, current velocity x, y, z, next waypoint position x, y, z,
# next waypoint direction relative to the current position of the car x, y, z, steering, and throttle
# dataLine = "{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}\n".format(
# posBoi.x, posBoi.y, posBoi.z,
# velBoi[0], velBoi[1], velBoi[2],
# next_waypoint.location.x, next_waypoint.location.y, next_waypoint.location.z,
# way_dir[0], way_dir[1], way_dir[2],
# steering,
# throttle)
# with open("tmp/pid_data.csv", "a") as f:
# f.write(dataLine)
return VehicleControl(throttle=throttle, steering=steering)
@staticmethod
def find_k_values(vehicle: Vehicle, config: dict) -> np.array:
current_speed = Vehicle.get_speed(vehicle=vehicle)
k_p, k_d, k_i = 1, 0, 0
for speed_upper_bound, kvalues in config.items():
speed_upper_bound = float(speed_upper_bound)
if current_speed < speed_upper_bound:
k_p, k_d, k_i = kvalues["Kp"], kvalues["Kd"], kvalues["Ki"]
break
# this clips the K values to be at most 1, so setting any K values larger than
# that does absolutely nothing lmao
return np.clip([k_p, k_d, k_i], a_min=0, a_max=1)
class LongPIDController(Controller):
def __init__(self, agent, config: dict, throttle_boundary: Tuple[float, float], max_speed: float,
dt: float = 0.03, errAlpha: float = 1.0, **kwargs):
super().__init__(agent, **kwargs)
self.config = config
self.max_speed = max_speed
self.throttle_boundary = throttle_boundary
self._error_buffer = deque(maxlen=10)
self.errBoi = 0.0
self.errAlpha = errAlpha
self._dt = dt
def run_in_series(self, next_waypoint: Transform, errBoi: float = 0.0, **kwargs) -> float:
target_speed = min(self.max_speed, kwargs.get("target_speed", self.max_speed))
# if we are very off track, update error to reflect that
if errBoi > self.errBoi:
self.errBoi = errBoi
else: # if we are getting back on track, gradually reduce our error
self.errBoi = self.errBoi*(1-self.errAlpha) + errBoi*self.errAlpha
# reduce our target speed based on how far off target we are
target_speed *= (math.exp(-self.errBoi) - 1)/2 + 1
current_speed = Vehicle.get_speed(self.agent.vehicle)
k_p, k_d, k_i = PIDController.find_k_values(vehicle=self.agent.vehicle, config=self.config)
error = target_speed - current_speed
self._error_buffer.append(error)
if len(self._error_buffer) >= 2:
# print(self._error_buffer[-1], self._error_buffer[-2])
_de = (self._error_buffer[-2] - self._error_buffer[-1]) / self._dt
_ie = sum(self._error_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
output = float(np.clip((k_p * error) + (k_d * _de) + (k_i * _ie), self.throttle_boundary[0],
self.throttle_boundary[1]))
# self.logger.debug(f"curr_speed: {round(current_speed, 2)} | kp: {round(k_p, 2)} | kd: {k_d} | ki = {k_i} | "
# f"err = {round(error, 2)} | de = {round(_de, 2)} | ie = {round(_ie, 2)}")
#f"self._error_buffer[-1] {self._error_buffer[-1]} | self._error_buffer[-2] = {self._error_buffer[-2]}")
return output
class LatPIDController(Controller):
def __init__(self, agent, config: dict, steering_boundary: Tuple[float, float],
dt: float = 0.03, **kwargs):
super().__init__(agent, **kwargs)
self.config = config
self.steering_boundary = steering_boundary
self._error_buffer = deque(maxlen=10)
self._dt = dt
def run_in_series(self, next_waypoint: Transform, **kwargs) -> float:
# calculate a vector that represent where you are going
v_begin = self.agent.vehicle.transform.location
v_end = v_begin + Location(
x=math.cos(math.radians(self.agent.vehicle.transform.rotation.pitch)),
y=v_begin.y,
z=math.sin(math.radians(self.agent.vehicle.transform.rotation.pitch)),
)
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, v_end.z - v_begin.z])
# calculate error projection
w_vec = np.array(
[
next_waypoint.location.x - v_begin.x,
next_waypoint.location.y - v_begin.y,
next_waypoint.location.z - v_begin.z,
]
)
_dot = math.acos(
np.clip(
np.dot(w_vec, v_vec) / (np.linalg.norm(w_vec) * np.linalg.norm(v_vec)),
-1.0,
1.0,
)
)
_cross = np.cross(v_vec, w_vec)
if _cross[1] > 0:
_dot *= -1.0
self._error_buffer.append(_dot)
if len(self._error_buffer) >= 2:
_de = (self._error_buffer[-1] - self._error_buffer[-2]) / self._dt
_ie = sum(self._error_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
k_p, k_d, k_i = PIDController.find_k_values(config=self.config, vehicle=self.agent.vehicle)
lat_control = float(
np.clip((k_p * _dot) + (k_d * _de) + (k_i * _ie), self.steering_boundary[0], self.steering_boundary[1])
)
return lat_control, _dot, v_begin, v_vec, w_vec | 45.073864 | 125 | 0.592714 |
c21e8b033501d16442cde0176b6da38803be7d71 | 1,353 | py | Python | data/user_input/project/loadProjectInput.py | open-pulse/OpenPulse | ef49cd1ff672821c4b57729c0ef9f4ff5a83eadf | [
"MIT"
] | 23 | 2020-01-14T12:49:11.000Z | 2021-11-10T05:19:29.000Z | data/user_input/project/loadProjectInput.py | open-pulse/OpenPulse | ef49cd1ff672821c4b57729c0ef9f4ff5a83eadf | [
"MIT"
] | 101 | 2020-01-23T19:29:00.000Z | 2022-03-15T17:56:23.000Z | data/user_input/project/loadProjectInput.py | open-pulse/OpenPulse | ef49cd1ff672821c4b57729c0ef9f4ff5a83eadf | [
"MIT"
] | 3 | 2020-01-14T12:49:26.000Z | 2022-01-13T02:06:53.000Z | from PyQt5.QtWidgets import QToolButton, QLineEdit, QDialogButtonBox, QFileDialog, QDialog, QMessageBox, QTabWidget, QProgressBar, QLabel, QListWidget
from pulse.project import Project
from PyQt5.QtGui import QIcon
from os.path import basename, expanduser, exists
from PyQt5 import uic
import os
import configparser
from shutil import copyfile
import numpy as np
from time import time
from PyQt5 import uic
class LoadProjectInput(QDialog):
def __init__(self, project, config, path, *args, **kwargs):
super().__init__(*args, **kwargs)
self.project = project
self.config = config
self.path = path
self.userPath = expanduser('~')
self.complete_project_path = ""
self.complete = False
if self.path is None:
self.complete_project_path, _type = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'OpenPulse Project (*.ini)')
else:
self.complete_project_path = self.path
if self.complete_project_path != "":
t0 = time()
self.project.load_project(self.complete_project_path)
self.config.writeRecentProject(self.project.get_project_name(), self.complete_project_path)
self.complete = True
self.project.time_to_load_or_create_project = time() - t0
self.close() | 38.657143 | 150 | 0.686622 |
6fd4f6c0052876af6a7c178eaf437031f6475aef | 4,252 | py | Python | src/amuse/test/suite/codes_tests/test_parallel.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 131 | 2015-06-04T09:06:57.000Z | 2022-02-01T12:11:29.000Z | src/amuse/test/suite/codes_tests/test_parallel.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 690 | 2015-10-17T12:18:08.000Z | 2022-03-31T16:15:58.000Z | src/amuse/test/suite/codes_tests/test_parallel.py | rieder/amuse | 3ac3b6b8f922643657279ddee5c8ab3fc0440d5e | [
"Apache-2.0"
] | 102 | 2015-01-22T10:00:29.000Z | 2022-02-09T13:29:43.000Z | from amuse.test.amusetest import TestWithMPI
import os
import sys
from amuse.community.hermite.interface import Hermite
from amuse.community.bhtree.interface import BHTree
import numpy
import threading
from amuse.units import nbody_system
from amuse.units import units
from amuse.datamodel import Particles
try:
from matplotlib import pyplot
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
class TestAmuseInterface(TestWithMPI):
def new_system_sun_and_earth(self):
result = Particles(2)
sun = result[0]
sun.mass = units.MSun(1.0)
sun.position = units.m(numpy.array((0.0,0.0,0.0)))
sun.velocity = units.ms(numpy.array((0.0,0.0,0.0)))
sun.radius = units.RSun(1.0)
earth = result[1]
earth.mass = units.kg(5.9736e24)
earth.radius = units.km(6371)
earth.position = units.km(numpy.array((149.5e6,0.0,0.0)))
earth.velocity = units.ms(numpy.array((0.0,29800,0.0)))
return result
def evolve_model_unit_day(self, instance, particles, day):
delta_days = 5
for x in range(1, day + delta_days, delta_days):
instance.evolve_model(x | units.day)
instance.particles.copy_values_of_all_attributes_to(particles)
particles.savepoint()
def test1(self):
from amuse.rfi import channel
channel.MpiChannel.ensure_mpi_initialized()
is_threaded = channel.MpiChannel.is_threaded()
is_multithreading_supported = channel.MpiChannel.is_multithreading_supported()
self.assertEqual(is_threaded, is_multithreading_supported)
def test2(self):
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 149.5e6 | units.km)
bhtree = BHTree(convert_nbody)
bhtree.initialize_code()
bhtree.eps2_for_gravity = 0.001
bhtree_particles = self.new_system_sun_and_earth()
bhtree.particles.add_particles(bhtree_particles)
if bhtree.legacy_interface.channel_type == 'mpi':
from mpi4py import MPI
if not MPI.Query_thread() == MPI.THREAD_MULTIPLE:
bhtree.stop()
self.skip("can only test parallel with multiple thread support in mpi implementation")
hermite = Hermite(convert_nbody)
hermite.dt_dia = 5000
hermite.commit_parameters()
hermite_particles = self.new_system_sun_and_earth()
hermite.particles.add_particles(hermite_particles)
thread1 = threading.Thread(target = self.evolve_model_unit_day, args = (bhtree, bhtree_particles, 10))
thread2 = threading.Thread(target = self.evolve_model_unit_day, args = (hermite, hermite_particles, 10))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
if HAS_MATPLOTLIB:
figure = pyplot.figure()
plot = figure.add_subplot(1,1,1)
earth = bhtree_particles[1]
x_points = earth.get_timeline_of_attribute("x")
y_points = earth.get_timeline_of_attribute("y")
x_points_in_AU = [t_x[1].value_in(units.AU) for t_x in x_points]
y_points_in_AU = [t_x1[1].value_in(units.AU) for t_x1 in y_points]
plot.scatter(x_points_in_AU,y_points_in_AU, color = "b", marker = 'o')
earth = hermite_particles[1]
x_points = earth.get_timeline_of_attribute("x")
y_points = earth.get_timeline_of_attribute("y")
x_points_in_AU = [t_x2[1].value_in(units.AU) for t_x2 in x_points]
y_points_in_AU = [t_x3[1].value_in(units.AU) for t_x3 in y_points]
plot.scatter(x_points_in_AU,y_points_in_AU, color = "g", marker = 'o')
plot.set_xlim(-1.5, 1.5)
plot.set_ylim(-1.5, 1.5)
test_results_path = self.get_path_to_results()
output_file = os.path.join(test_results_path, "parallel-earth-sun.svg")
figure.savefig(output_file)
bhtree.stop()
hermite.stop()
bhtree.stop()
| 36.655172 | 112 | 0.624882 |
ff9d305e563ee2af30fba3fcbdf0dc1c2b7b7558 | 869 | py | Python | entry/sumo_config/sumo_eval_default_config.py | opendilab/DI-smartcross | 362c6c6dcfd2e1f59d3e7c955ffe2d9d1b13d8d2 | [
"Apache-2.0"
] | 49 | 2021-12-28T08:10:44.000Z | 2022-01-24T04:09:41.000Z | entry/sumo_config/sumo_eval_default_config.py | opendilab/DI-smartcross | 362c6c6dcfd2e1f59d3e7c955ffe2d9d1b13d8d2 | [
"Apache-2.0"
] | null | null | null | entry/sumo_config/sumo_eval_default_config.py | opendilab/DI-smartcross | 362c6c6dcfd2e1f59d3e7c955ffe2d9d1b13d8d2 | [
"Apache-2.0"
] | null | null | null | from easydict import EasyDict
sumo_eval_default_config = dict(
env=dict(
manager=dict(
# Whether to use shared memory. Only effective if manager type is 'subprocess'
shared_memory=False,
context='spawn',
retry_type='renew',
),
n_evaluator_episode=1,
collector_env_num=1,
evaluator_env_num=1,
stop_value=99999,
),
policy=dict(cuda=False, )
)
create_config = dict(
env_manager=dict(type='subprocess', ),
env=dict(
# Must use the absolute path. All the following "import_names" should obey this too.
import_names=['smartcross.envs.sumo_env'],
type='sumo_env',
),
policy=dict(),
)
create_config = EasyDict(create_config)
sumo_eval_default_config = EasyDict(sumo_eval_default_config)
main_config = sumo_eval_default_config
| 27.15625 | 92 | 0.654776 |
bdc5463ca813a7e7162af9a7b29fa8e452919a67 | 22,098 | py | Python | noaa_sdk/noaa.py | windyrose/noaaAPI | 00e52e9cccce526070b1b2bb95b01e0643e0b0b2 | [
"MIT"
] | null | null | null | noaa_sdk/noaa.py | windyrose/noaaAPI | 00e52e9cccce526070b1b2bb95b01e0643e0b0b2 | [
"MIT"
] | null | null | null | noaa_sdk/noaa.py | windyrose/noaaAPI | 00e52e9cccce526070b1b2bb95b01e0643e0b0b2 | [
"MIT"
] | 1 | 2020-04-06T12:31:19.000Z | 2020-04-06T12:31:19.000Z | """
API Wrapper for NOAA API V3
===========================
For more detailed information about NOAA API,
visit: https://www.weather.gov/documentation/services-web-api
Geoencoding is made possible by Open Street Map (© OpenStreetMap contributors)
For copyright information, visit: https://www.openstreetmap.org/copyright
"""
import json
from urllib.parse import urlencode
from noaa_sdk.util import UTIL
from noaa_sdk.accept import ACCEPT
class OSM(UTIL):
"""
Make request to Open Street Map nominatim Api.
==============================================
© OpenStreetMap contributors
Open Street Map, you guys are awesome, don't block me please.
"""
OSM_ENDPOINT = 'nominatim.openstreetmap.org'
def __init__(self, show_uri=False):
"""Constructor.
"""
self._user_agent = 'pypi noaa_sdk'
self._accept = ACCEPT.JSON
super().__init__(
user_agent=self._user_agent, accept=ACCEPT.JSON,
show_uri=show_uri)
def get_lat_lon_by_postalcode_country(self, postalcode, country):
"""Get latitude and longitude coordinate from postalcode
and country code.
Args:
postalcode (str): postal code.
country (str): 2 letter country code.
Returns:
tuple: tuple of latitude and longitude.
"""
res = self.make_get_request(
'/search?postalcode={}&country={}&format=json'.format(
postalcode, country), end_point=self.OSM_ENDPOINT)
if len(res) == 0 or 'lat' not in res[0] or 'lon' not in res[0]:
raise Exception(
'Postalcode and Country: {}, {} does not exist.'.format(
postalcode, country))
return float(res[0]['lat']), float(res[0]['lon'])
def get_postalcode_country_by_lan_lon(self, lat, lon):
"""Get postalcode and country code by latitude and longitude.
Args:
lat (float): latitude.
lon (float): longitude.
Returns:
tuple: tuple of postalcode and country code.
"""
res = self.make_get_request(
'/reverse?lat={}&lon={}&addressdetails=1&format=json'.format(
lat, lon),
end_point=self.OSM_ENDPOINT)
if 'address' not in res:
raise Exception('No address found.')
if 'country_code' not in res['address']:
raise Exception('No country code found.')
if 'postcode' not in res['address']:
raise Exception('No postal code found.')
return res['address']['postcode'], res['address']['country_code']
class NOAA(UTIL):
"""Main class for getting data from NOAA."""
DEFAULT_END_POINT = 'api.weather.gov'
DEFAULT_USER_AGENT = 'Test (your@email.com)'
def __init__(self, user_agent=None, accept=None, show_uri=False):
"""Constructor.
Args:
user_agent (str[optional]): user agent specified in the header.
accept (str[optional]): accept string specified in the header.
show_uri (boolean[optional]): True for showing the
actual url with query string being sent for requesting data.
"""
if not user_agent:
user_agent = self.DEFAULT_USER_AGENT
if not accept:
accept = ACCEPT.GEOJSON
super().__init__(
user_agent=user_agent, accept=accept,
show_uri=show_uri)
self._osm = OSM()
def get_forecasts(self, postal_code, country, hourly=True):
"""Get forecasts by postal code and country code.
This is an extensive functionality, aligning data
from Open Street Map to enable postal code and country code
params for weather forecast data.
Args:
postalcode (str): postal code.
country (str): 2 letter country code.
hourly (boolean[optional]): True for getting hourly forecast.
Returns:
list: list of weather forecasts.
"""
lat, lon = self._osm.get_lat_lon_by_postalcode_country(
postal_code, country)
res = self.points_forecast(lat, lon, hourly)
if 'status' in res and res['status'] == 503 and 'detail' in res:
raise Exception('Status: {}, NOAA API Error Response: {}'.format(
res['status'], res['detail']))
elif 'properties' not in res:
raise Exception(
'"properties" attribute not found. Possible response json changes')
elif 'properties' in res and 'periods' not in res['properties']:
raise Exception(
'"periods" attribute not found. Possible response json changes')
return res['properties']['periods']
def get_observations(
self, postalcode, country, start=None, end=None, num_of_stations=1):
"""Get all nearest station observations by postalcode and
country code.
This is an extensive functionality, aligning data
from Open Street Map to enable postal code and country code
params for weather observation data.
Args:
postalcode (str): postal code.
country (str): 2 letter country code.
start (str[optional]): start date of observation
(eg. '%Y-%m-%dT%H:%M:%SZ' | '%Y-%m-%d' | '%Y-%m-%d %H:%M:%S').
end (str[optional]): end date of observation
(eg. '%Y-%m-%dT%H:%M:%SZ' | '%Y-%m-%d' | '%Y-%m-%d %H:%M:%S').
num_of_stations (int[optional]): get observations from the
nearest x stations. (Put -1 of wants to get all stations.)
Returns:
generator: generator of dictionaries of observations with the
following variables: 'relativeHumidity', 'presentWeather',
'minTemperatureLast24Hours', 'windGust', 'precipitationLast6Hours',
'cloudLayers', 'dewpoint', 'temperature', 'windDirection',
'rawMessage', '@type', 'precipitationLast3Hours',
'visibility', 'icon', 'barometricPressure', 'windChill',
'station', 'textDescription', 'seaLevelPressure', '@id',
'timestamp', 'maxTemperatureLast24Hours', 'precipitationLastHour',
'heatIndex', 'windSpeed', 'elevation'
"""
lat, lon = self._osm.get_lat_lon_by_postalcode_country(
postalcode, country)
return self.get_observations_by_lat_lon(lat, lon, start, end, num_of_stations)
def get_observations_by_lat_lon(
self, lat, lon, start=None, end=None, num_of_stations=1):
"Same as get_observations() but uses Lat and Lon instead of Postalcode and Country"
stations_observations_params = {}
if start:
stations_observations_params['start'] = start
if end:
stations_observations_params['end'] = end
points_res = self.points(
'{},{}'.format(round(lat, 4), round(lon, 4)))
if 'properties' not in points_res or 'observationStations' not in points_res['properties']:
raise Exception('Error: No Observation Stations found.')
stations = self.make_get_request(
uri=points_res['properties']['observationStations'],
end_point=self.DEFAULT_END_POINT)['observationStations']
for index, station in enumerate(stations):
if num_of_stations > 0 and num_of_stations <= index:
break
station_id = station.split('/')[-1]
response = self.stations_observations(
station_id=station_id, **stations_observations_params)
observations = response
if type(response) == dict:
observations = response['features']
for observation in observations:
yield observation.get('properties')
def get_observations_by_postalcode_country(
self, postalcode, country, start=None, end=None, num_of_stations=1):
"""Deprecated. Please use method get_observations."""
return self.get_observations(postalcode, country, start, end, num_of_stations)
def points(self, point, stations=False):
"""Metadata about a point.
This is the primary endpoint for forecast information for a location.
It contains linked data for the forecast, the hourly forecast,
observation and other information. It also shows stations nearest to a point
in order of distance.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
point (str): lat,long.
stations (boolean): True for finding stations.
Returns:
json: json response from api.
"""
if stations:
return self.make_get_request(
"/points/{point}/stations".format(point=point),
end_point=self.DEFAULT_END_POINT)
return self.make_get_request(
"/points/{point}".format(point=point),
end_point=self.DEFAULT_END_POINT)
def points_forecast(self, lat, long, hourly=False):
"""Get observation data from a weather station.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
lat (float): latitude of the weather station coordinate.
long (float): longitude of the weather station coordinate.
hourly (boolean[optional]): True for getting hourly forecast.
Returns:
json: json response from api.
"""
points = self.make_get_request(
"/points/{lat},{long}".format(
lat=lat, long=long), end_point=self.DEFAULT_END_POINT)
uri = points['properties']['forecast']
if hourly:
uri = points['properties']['forecastHourly']
return self.make_get_request(
uri=uri, end_point=self.DEFAULT_END_POINT)
def stations(self, **params):
"""Get list of US weather stations and their metadata.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
station_id (str[optional]): station id.
state (str[optional]): 2 letters state code.
limit (int[optional]): limit of results.
Returns:
json: json response from api.
"""
if len(params) > 0:
if 'station_id' in params:
params['id'] = params['station_id']
del params['station_id']
return self.make_get_request(
"/stations?{query_string}".format(
query_string=urlencode(params)),
end_point=self.DEFAULT_END_POINT)
return self.make_get_request(
"/stations", end_point=self.DEFAULT_END_POINT)
def stations_observations(self, station_id, **params):
"""Get observation data from specific station.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
(*Note: There is a delay on NOAA's side for "unpopular" stations which
causes start and end params not enable to query anything sometimes.)
Args:
station_id (str): station id.
start (str[optional]): start date of observation
(eg. '%Y-%m-%dT%H:%M:%SZ' | '%Y-%m-%d' | '%Y-%m-%d %H:%M:%S').
end (str[optional]): end date of observation
(eg. '%Y-%m-%dT%H:%M:%SZ' | '%Y-%m-%d' | '%Y-%m-%d %H:%M:%S').
limit (int[optional]): limit of results.
current (bool[optional]): True if needs current observations.
recordId (str[optional]): recordId, Record Id (ISO8601DateTime)
Returns:
json: json response from api.
"""
if not station_id:
raise Exception("'station_id' is required.")
if 'recordId' in params and 'current' in params:
raise Exception("Cannot have both 'current' and 'recordId'")
if 'start' in params:
start = params['start']
self.parse_param_timestamp(start)
if len(start) < 19:
start = '{}T00:00:00Z'.format(start[:10])
elif len(params['start']) < 20:
start = start.replace(' ', 'T')
start = '{}Z'.format(start)
params['start'] = start
if 'end' in params:
end = params['end']
self.parse_param_timestamp(end)
if len(end) < 19:
end = '{}T23:59:59Z'.format(end[:10])
elif len(params['end']) < 20:
end = end.replace(' ', 'T')
end = '{}Z'.format(end)
params['end'] = end
request_uri = "/stations/{stationId}/observations".format(
stationId=station_id)
if len(params) > 0:
if 'recordId' in params:
return self.make_get_request(
'{old_request_uri}/{recordId}'.format(
old_request_uri=request_uri,
recordId=params['recordId']),
end_point=self.DEFAULT_END_POINT)
if 'current' in params:
return self.make_get_request(
'{old_request_uri}/current'.format(
old_request_uri=request_uri),
end_point=self.DEFAULT_END_POINT)
if len(params) > 1:
request_uri = '{old_request_uri}?{query_string}'.format(
old_request_uri=request_uri,
query_string=urlencode(params))
observations = self.make_get_request(
request_uri, end_point=self.DEFAULT_END_POINT)
if 'features' not in observations:
raise Exception(observations)
return observations['features']
return self.make_get_request(
"/stations/{stationId}/observations".format(stationId=station_id),
end_point=self.DEFAULT_END_POINT)
def products(self, id):
"""Get data of a product.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
id (str): product id.
Returns:
json: json response from api.
"""
return self.make_get_request(
"/products/{productId}".format(productId=id),
end_point=self.DEFAULT_END_POINT)
def products_types(self, **params):
"""Get a list of product types with an active product.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
type_id (str): an id of a valid product type
locations (boolean[optional]): True to get a list of
locations that have issues products for a type.
location_id (str): location id.
Returns:
json: json response from api.
"""
if 'type_id' in params and 'locations' not in params:
return self.make_get_request(
"/products/types/{type_id}".format(type_id=params['type_id']),
end_point=self.DEFAULT_END_POINT)
elif 'locations' in params:
if 'type_id' not in params:
raise Exception('Error: Missing type id (type_id=None)')
if 'location_id' in params:
return self.make_get_request(
('/products/types/{type_id}/locations/'
'{location_id}').format(
type_id=params['type_id'],
location_id=params['location_id']),
end_point=self.DEFAULT_END_POINT)
else:
return self.make_get_request(
"/products/types/{type_id}/locations".format(
type_id=params['type_id']),
end_point=self.DEFAULT_END_POINT)
return self.make_get_request(
"/products/types",
end_point=self.DEFAULT_END_POINT)
def products_locations(self, **params):
"""A list of locations with active products.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
location_id (str): location id.
Returns:
json: json response from api.
"""
if 'location_id' in params:
return self.make_get_request(
"/products/locations/{locationId}/types".format(
locationId=params['location_id']),
end_point=self.DEFAULT_END_POINT)
return self.make_get_request(
"/products/locations",
end_point=self.DEFAULT_END_POINT)
def offices(self, office_id):
"""Metadata about a Weather Office.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
office_id (str): office id.
Returns:
json: json response from api.
"""
return self.make_get_request("/offices/{office_id}".format(
office_id=office_id), end_point=self.DEFAULT_END_POINT)
def zones(self, type, zone_id, forecast=False):
"""Metadata for a zone and forecast data for zone.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
type (str): a valid zone type (list forthcoming).
zone_id (str): a zone id (list forthcoming).
forecast (bool): True to show forecast data of the zone.
Returns:
json: json response from api.
"""
if forecast:
return self.make_get_request(
"/zones/{type}/{zone_id}/forecast".format(
type=type, zone_id=zone_id),
end_point=self.DEFAULT_END_POINT)
return self.make_get_request("/zones/{type}/{zone_id}".format(
type=type, zone_id=zone_id), end_point=self.DEFAULT_END_POINT)
def alerts(self, **params):
"""A list of alerts that can be filtered by parameters.
If no parameters are provided, then all alerts are returned.
The ATOM format returns items in CAP-ATOM.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
alert_id (str): alert id.
active (int): Active alerts (1 or 0).
start (str): Start time (ISO8601DateTime).
end (str): End time (ISO8601DateTime).
status (str): Event status (alert, update, cancel).
type (str): Event type (list forthcoming).
zone_type (str): Zone type (land or marine).
point (str): Point (latitude,longitude).
region (str): Region code (list forthcoming).
state (str): State/marine code (list forthcoming).
zone (str): Zone Id (forecast or county, list forthcoming).
urgency (str): Urgency (expected, immediate).
severity (str): Severity (minor, moderate, severe).
certainty (str): Certainty (likely, observed).
limit (int) Limit (an integer).
Returns:
json: json response from api.
"""
if 'alert_id' in params:
return self.make_get_request(
"/alerts/{alert_id}".format(alert_id=params['alert_id']),
end_point=self.DEFAULT_END_POINT)
return self.make_get_request(
"/alerts?{query_string}".format(query_string=urlencode(params)),
end_point=self.DEFAULT_END_POINT)
def active_alerts(self, count=False, **params):
"""Active alerts endpoints.
Response in this method should not be modified.
In this way, we can keep track of changes made by NOAA through
functional tests @todo(paulokuong) later on.
Args:
count (bool): True to hit /alerts/active/count.
zone_id (str): a valid zone, see list in counts endpoint.
area (str): a valid area, see list in counts endpoint.
region (str): a valid region, see list in counts endpoint
Returns:
json: json response from api.
"""
if count:
return self.make_get_request(
"/alerts/count",
end_point=self.DEFAULT_END_POINT)
if 'zone_id' in params:
return self.make_get_request(
"/alerts/active/zone/{zoneId}".format(
zoneId=params['zone_id']),
end_point=self.DEFAULT_END_POINT)
if 'area' in params:
return self.make_get_request(
"/alerts/active/area/{area}".format(
area=params['area']),
end_point=self.DEFAULT_END_POINT)
if 'region' in params:
return self.make_get_request(
"/alerts/active/region/{region}".format(
region=params['region']),
end_point=self.DEFAULT_END_POINT)
return self.make_get_request(
"/alerts/active",
end_point=self.DEFAULT_END_POINT)
| 40.178182 | 99 | 0.587429 |
d6c89b1b864a31c7b092b140324abcfb4edb3bb5 | 7,540 | py | Python | training/utils/uniio.py | iyush/manta | 723a2dc9e10464d87810b3c2104c81912e1e9fcc | [
"Apache-2.0"
] | 5 | 2021-05-11T21:19:54.000Z | 2021-06-04T11:23:38.000Z | training/utils/uniio.py | iyush/manta | 723a2dc9e10464d87810b3c2104c81912e1e9fcc | [
"Apache-2.0"
] | null | null | null | training/utils/uniio.py | iyush/manta | 723a2dc9e10464d87810b3c2104c81912e1e9fcc | [
"Apache-2.0"
] | null | null | null | #******************************************************************************
#
# MantaFlow fluid solver framework
# Copyright 2017 Nils Thuerey, Boris Bonev
#
# This program is free software, distributed under the terms of the
# Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Read mantaflow uni files into numpy arrays
#
#******************************************************************************
import gzip
import struct
import sys
import os
import shutil
from datetime import date
from collections import namedtuple
import numpy as np
# python 2 vs 3 switch
PY3K = sys.version_info >= (3, 0)
# read content of grid
def RU_read_content(bytestream, header):
assert (header['bytesPerElement'] == 12 and header['elementType'] == 2) or (header['bytesPerElement'] == 4 and (header['elementType'] == 0 or header['elementType'] == 1))
if (header['elementType'] == 0):
data = np.frombuffer(bytestream.read(), dtype="int32") # int grid
else:
data = np.frombuffer(bytestream.read(), dtype="float32") # float grid , scalar or vec3
channels = 1
if (header['elementType'] == 2):
channels = 3
dimensions = [header['dimT'], header['dimZ'], header['dimY'], header['dimX'], channels]
if header['dimT']<=1:
dimensions = [header['dimZ'], header['dimY'], header['dimX'], channels]
return data.reshape( *dimensions, order='C')
# read uni file header (v3)
def RU_read_header(bytestream):
ID = bytestream.read(4)
# in python3, ID == b'MNT3' or b'MNT2' or ..., have to decode
if(PY3K): ID = ID.decode("utf-8")
if ID=="MNT2":
# unpack header struct object
header = namedtuple('HeaderV3', 'dimX, dimY, dimZ, gridType, elementType, bytesPerElement, info, timestamp')
# convert to namedtuple and then directly to a dict
header = header._asdict(header._make(struct.unpack('iiiiii256sQ', bytestream.read(288))))
# when writing, we'll need a v4 header field, re-pack...
header['dimT'] = 0
header['info'] = header['info'][0:252]
head4 = namedtuple('HeaderV4', 'dimX, dimY, dimZ, gridType, elementType, bytesPerElement, info, dimT, timestamp')(**header)
header = head4._asdict()
elif ID=="MNT3":
# unpack header struct object
header = namedtuple('HeaderV4', 'dimX, dimY, dimZ, gridType, elementType, bytesPerElement, info, dimT, timestamp')
# convert to namedtuple and then directly to a dict
# header is shorter for v3!
header = header._asdict(header._make(struct.unpack('iiiiii252siQ', bytestream.read(288))))
elif ID=="M4T2" or ID=="M4T3":
print("read_header error - 4D grids not yet supported")
exit(1)
else:
print("read_header error - unknown uni file header '%s' " % ID)
exit(1)
return header
# use this to read the .uni file. It will return the header as dictionary and the content as np-array
def readUni(filename):
#print("Reading '%s'" % filename) # debug
with gzip.open(filename, 'rb') as bytestream:
header = RU_read_header(bytestream)
content = RU_read_content(bytestream, header)
return header, content
# use this to write a .uni file. The header has to be supplied in the same dictionary format as the output of readuni
def writeUni(filename, header, content):
#print("Writing '%s'" % filename) # debug
with gzip.open(filename, 'wb') as bytestream:
# write the header of the uni file (old v3 header)
#bytestream.write(b'MNT2') # v3
#head_tuple = namedtuple('GenericDict', header.keys())(**header)
#head_buffer = struct.pack('iiiiii256sQ', *head_tuple)
# current header
bytestream.write(b'MNT3') # new, v4
head_tuple = namedtuple('HeaderV4', header.keys())(**header)
head_buffer = struct.pack('iiiiii252siQ', *head_tuple)
bytestream.write(head_buffer)
# always convert to single precision floats
if content.dtype!="float32":
content = np.asarray(content, dtype="float32")
# write grid content
if (header['elementType'] == 2):
# vec3 grid
content = content.reshape(header['dimX']*header['dimY']*header['dimZ']*3, order='C')
else:
# int or scalar grid
content = content.reshape(header['dimX']*header['dimY']*header['dimZ'], order='C')
if sys.version_info >= (3,0):
# changed for Python3
bytestream.write(memoryview(content))
else:
bytestream.write(np.getbuffer(content))
# backup code to test folder
def backupFile(name, test_path):
code_path = os.path.dirname(name) + '/' + os.path.basename(name)
if len(os.path.dirname(name))==0:
code_path = ".%s" % code_path
shutil.copy(code_path, test_path + os.path.basename(name))
#******************************************************************************
# particle data
def RP_read_header(bytestream):
ID = bytestream.read(4)
if(PY3K): ID = ID.decode("utf-8")
if ID=="PD01":
# unpack header struct object
header = namedtuple('UniPartHeader', 'dim, dimX, dimY, dimZ, elementType, bytesPerElement, info, timestamp')
# convert to namedtuple and then directly to a dict
header = header._asdict(header._make(struct.unpack('iiiiii256sQ', bytestream.read(288))))
else:
print("read_particle_header error - unknown uni file header '%s' " % ID)
exit(1)
return header
def RP_read_content(bytestream, header, data_type=None): # data_type = {None: BasicParticleSystem; "float32": Real; "int32": Int}
assert(header['bytesPerElement']==16 or header['bytesPerElement']==12 or header['bytesPerElement']==4)
if(header['elementType']==0): # BasicParticleSystem
print('(BasicParticleSystem) ' )
data = np.frombuffer(bytestream.read(), dtype=np.dtype([('f1',(np.float32,3)),('f2',(np.int32,1))]))['f1']
else: # header['elementType']==1: ParticleDataImpl<T>, where T = {float32: Real(4) or Vec3(12); int32: Int(4)}
print('(ParticleDataImpl<T={}{}>) '.format(data_type, 'x3' if (header['bytesPerElement']==12) else '') )
data = np.reshape(np.frombuffer(bytestream.read(), dtype=data_type), (-1, 3 if (header['bytesPerElement']==12) else 1))
return data
def readParticles(filename, data_type=None):
#print('Reading {} ... '.format(filename) )
with gzip.open(filename, 'rb') as bytestream:
head = RP_read_header(bytestream)
data = RP_read_content(bytestream, head, data_type)
return head, data
#******************************************************************************
# numpy array files
npBuf = {} # store arrays
npCnt = {} # filename counter
# FIXME , todo - add byte size limit per file at some point, to prevent them from getting too large
# buffer arrays, and write multiple to single file
def writeNumpyBuf(filename, content):
global npBuf,npCnt
if not filename in npBuf:
npBuf[filename] = []
npCnt[filename] = 0
npBuf[filename].append(content)
#print("writing buffered, arrays "+format( len(npBuf[filename]) ) + ", size "+ format(content.size) )
if len(npBuf[filename])>10:
#print("writing buffered "+filename)
np.savez_compressed( filename+("_%04d.npz"%(npCnt[filename])), *npBuf[filename] )
npCnt[filename] += 1
npBuf[filename] = []
# write all remaining ones
def finalizeNumpyBufs():
global npBuf,npCnt
for filename in npBuf.keys():
if len(npBuf[filename])>0:
#print("writing last buffered "+filename+ ", left " + format(len(npBuf[filename])))
np.savez_compressed( filename+("_%04d.npz"%(npCnt[filename])), *npBuf[filename] )
# reset...
npBuf = {}
npCnt = {}
# write a single numpy array into an npz file
def writeNumpySingle(filename, content):
#print("writing "+filename)
np.savez_compressed( filename, content )
def readNumpy(filename):
#print("reading "+filename)
npz = np.load( filename )
return npz
| 36.601942 | 171 | 0.670292 |
0637ac2aca62d5d13fab0b77688ecbe522624769 | 2,716 | py | Python | client/fase2/team04/arbol.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | client/fase2/team04/arbol.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | client/fase2/team04/arbol.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from tkinter import Menu, Tk, Text, DISABLED, RAISED,Frame, FLAT, Button, Scrollbar, Canvas, END
from tkinter import ttk
import tkinter as tk
from tkinter import messagebox as MessageBox
import json
class Arbol(Frame):
def __init__(self, *args, **kwargs):
Frame.__init__(self, *args, **kwargs)
# Estilos del Treeview, solamente para vista
style = ttk.Style()
style.theme_use("clam")
style.configure("Treeview",
background = "gray21",
foreground = "white",
fieldbackground = "silver",
activebackground="gray59"
)
# Crear las imagenes que iran en el treeview,
# Folder para Bases y File para tablas
# Creando el treeview
self.treeview = ttk.Treeview(self)
self.treeview.heading("#0", text="Navegador")
self.treeview.pack(side="top", fill="both", expand=True)
self.pack(side="top", fill="both", expand=True)
# Se llena el ultimo nivel del arbol, como es el ultimo nivel
# solo se llaman los inserts sin crear una variable item nueva.
# self.treeview.insert(subitem, END, text="Empleado",image=self.file_image)
# self.treeview.insert(subitem, END, text="Cliente",image=self.file_image)
# self.treeview.insert(subitem, END, text="Producto",image=self.file_image)
# Nuevo subitem con item como padre
# subitem = self.treeview.insert(item, END, text="Aurora",image=self.folder_image)
# Llenando este subitem
# self.treeview.insert(subitem, END, text="Animal",image=self.file_image)
# self.treeview.insert(subitem, END, text="Habitat",image=self.file_image)
# self.treeview.insert(subitem, END, text="Alimento",image=self.file_image)
# Colocando el arbol en el frame
def entregado(self, data):
print(data)
persons = json.loads(data)
# Encabezado de treeview
self.file_image = tk.PhotoImage(file="resources/file.png")
self.folder_image = tk.PhotoImage(file="resources/folder.png")
self.file_image = self.file_image.subsample(35)
self.folder_image = self.folder_image.subsample(38)
for i in self.treeview.get_children():
self.treeview.delete(i)
item = self.treeview.insert("", END, text="Bases de datos")
for key, value in persons.items():
subitem = self.treeview.insert(item, END, text=key,image=self.folder_image)
for key2, value2 in value.items():
self.treeview.insert(subitem, END, text=key2,image=self.file_image)
print (key2, value2)
| 39.362319 | 96 | 0.627761 |
2982129ff308be6d97970fe4d8f03904f2a5362e | 5,269 | py | Python | recipes/logr/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 1 | 2020-01-31T22:47:14.000Z | 2020-01-31T22:47:14.000Z | recipes/logr/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 3 | 2020-05-05T11:27:44.000Z | 2022-02-28T20:19:50.000Z | recipes/logr/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 1 | 2020-10-12T10:45:13.000Z | 2020-10-12T10:45:13.000Z | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
class LogrConan(ConanFile):
name = "logr"
license = "BSD-3-Clause"
homepage = "https://github.com/ngrodzitski/logr"
url = "https://github.com/conan-io/conan-center-index"
description = (
"Logger frontend substitution for spdlog, glog, etc "
"for server/desktop applications"
)
topics = ("logger", "development", "util", "utils")
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
no_copy_source = True
options = {"backend": ["spdlog", "glog", "log4cplus", "boostlog", None]}
default_options = {"backend": "spdlog"}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def requirements(self):
self.requires("fmt/8.0.1")
if self.options.backend == "spdlog":
self.requires("spdlog/1.9.2")
elif self.options.backend == "glog":
self.requires("glog/0.5.0")
elif self.options.backend == "log4cplus":
self.requires("log4cplus/2.0.5")
elif self.options.backend == "boostlog":
self.requires("boost/1.77.0")
def configure(self):
minimal_cpp_standard = "17"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
minimal_version = {
"gcc": "7",
"clang": "7",
"apple-clang": "10",
"Visual Studio": "16",
}
compiler = str(self.settings.compiler)
if compiler not in minimal_version:
self.output.warn(
(
"%s recipe lacks information about the %s compiler "
"standard version support"
)
% (self.name, compiler)
)
self.output.warn(
"%s requires a compiler that supports at least C++%s"
% (self.name, minimal_cpp_standard)
)
return
version = tools.Version(self.settings.compiler.version)
if version < minimal_version[compiler]:
raise ConanInvalidConfiguration(
"%s requires a compiler that supports at least C++%s"
% (self.name, minimal_cpp_standard)
)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["LOGR_WITH_SPDLOG_BACKEND"] = (
self.options.backend == "spdlog"
)
self._cmake.definitions["LOGR_WITH_GLOG_BACKEND"] = (
self.options.backend == "glog"
)
self._cmake.definitions["LOGR_WITH_LOG4CPLUS_BACKEND"] = (
self.options.backend == "log4cplus"
)
self._cmake.definitions["LOGR_WITH_BOOSTLOG_BACKEND"] = (
self.options.backend == "boostlog"
)
self._cmake.definitions["LOGR_INSTALL"] = True
self._cmake.definitions["LOGR_CONAN_PACKAGING"] = True
self._cmake.definitions["LOGR_BUILD_TESTS"] = False
self._cmake.definitions["LOGR_BUILD_EXAMPLES"] = False
self._cmake.definitions["LOGR_BUILD_BENCHMARKS"] = False
self._cmake.configure(source_folder=self._source_subfolder)
return self._cmake
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib"))
def package_id(self):
self.info.settings.clear()
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "logr"
self.cpp_info.names["cmake_find_package_multi"] = "logr"
self.cpp_info.components["logr_base"].includedirs = ["include"]
self.cpp_info.components["logr_base"].requires = ["fmt::fmt"]
if self.options.backend == "spdlog":
self.cpp_info.components["logr_spdlog"].includedirs = []
self.cpp_info.components["logr_spdlog"].requires = [
"logr_base",
"spdlog::spdlog",
]
elif self.options.backend == "glog":
self.cpp_info.components["logr_glog"].includedirs = []
self.cpp_info.components["logr_glog"].requires = [
"logr_base",
"glog::glog",
]
elif self.options.backend == "log4cplus":
self.cpp_info.components["logr_log4cplus"].includedirs = []
self.cpp_info.components["logr_log4cplus"].requires = [
"logr_base",
"log4cplus::log4cplus",
]
elif self.options.backend == "boostlog":
self.cpp_info.components["logr_boostlog"].includedirs = []
self.cpp_info.components["logr_boostlog"].requires = [
"logr_base",
"boost::log",
]
| 35.126667 | 76 | 0.583792 |
db2584da9519061d7a8ce5f0df26f5c9abb3fc71 | 170 | py | Python | models/__init__.py | cartologic/cartoview_cms | 3f21a092c90db3d4560d69c1c2a6c7843b23ea0e | [
"BSD-2-Clause"
] | null | null | null | models/__init__.py | cartologic/cartoview_cms | 3f21a092c90db3d4560d69c1c2a6c7843b23ea0e | [
"BSD-2-Clause"
] | null | null | null | models/__init__.py | cartologic/cartoview_cms | 3f21a092c90db3d4560d69c1c2a6c7843b23ea0e | [
"BSD-2-Clause"
] | null | null | null | from .forms.FormPage import FormPage
from .generic_module.GenericModule import GenericModule
from .generic_module.GenericPage import GenericPage
from .signals import *
| 24.285714 | 55 | 0.847059 |
ecc9f3b1e364dd13118439498ec3262d6f979930 | 1,994 | py | Python | ise_mac_check_app.py | andrewohanian/ISE_MAC_Checker | 4c9e6561cffd17684ce00bd48329e8df8a4138c5 | [
"MIT"
] | null | null | null | ise_mac_check_app.py | andrewohanian/ISE_MAC_Checker | 4c9e6561cffd17684ce00bd48329e8df8a4138c5 | [
"MIT"
] | null | null | null | ise_mac_check_app.py | andrewohanian/ISE_MAC_Checker | 4c9e6561cffd17684ce00bd48329e8df8a4138c5 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
from ise_mac_search import get_endpoints, get_group_id, get_group, get_rejected_endpoints
import re
app = Flask(__name__)
#result=''
approved_groups = ['Phones', 'Printers', 'Video_Camers', 'IoT']
@app.route('/')
def index():
return render_template('home.html')
@app.route('/send', methods=['POST'])
def send(result=result):
if request.method == 'POST':
#Obtain the MAC submitted through the web form
mac = request.form['mac']
#Check if MAC is formatted properly
if re.match(r"^([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})$", mac):
#Check if MAC is rejected first
rejected_endpoints = get_rejected_endpoints()
if mac in rejected_endpoints:
return render_template('home.html', result=f'MAC {mac} is currenty in a rejected state.', return_error=True)
#If MAC is not rejected, check if it exists in an approved group
else:
endpoint_link = get_endpoints(mac=mac)
if endpoint_link:
group_id = get_group_id(endpoint_link)
group_name = get_group(group_id)
if group_name not in approved_groups:
return render_template('home.html', result=f'No match for [{mac}]. Please submit a ticket to add it to the database of approved devices.')
else:
print(f'Match found, {mac} exists in group: {group_name}')
return render_template('home.html', result=f'MAC Found: [{mac}] exists in group [{group_name}]')
else:
print(f"No match for {mac}")
return render_template('home.html', result=f'No match for [{mac}]. Please submit a HEAT ticket to add it to the database of approved devices.')
else:
return render_template('home.html', result='Failed MAC formatting check. Please format MAC as 01:23:45:67:89:AB', return_error=True)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 40.693878 | 155 | 0.642929 |
209bace101b1d7acae81e53769c4d64aae33f1a2 | 4,577 | py | Python | qa/rpc-tests/multi_rpc.py | Hinco01/Wolfcoin | d479d5e0aa6aef1a87379f9f8c19ca608dde5a2e | [
"MIT"
] | 6 | 2019-01-13T02:43:45.000Z | 2019-11-13T08:37:51.000Z | qa/rpc-tests/multi_rpc.py | Hinco01/Wolfcoin | d479d5e0aa6aef1a87379f9f8c19ca608dde5a2e | [
"MIT"
] | null | null | null | qa/rpc-tests/multi_rpc.py | Hinco01/Wolfcoin | d479d5e0aa6aef1a87379f9f8c19ca608dde5a2e | [
"MIT"
] | 3 | 2019-01-28T18:12:00.000Z | 2019-05-27T17:48:17.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test multiple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
#Append rpcauth to wolfcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "wolfcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 37.826446 | 129 | 0.645401 |
1fbca7076e81d9f2aa8ee86e83068cac998bf609 | 3,419 | py | Python | melodic/lib/python2.7/dist-packages/rqt_bag/message_listener_thread.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | null | null | null | melodic/lib/python2.7/dist-packages/rqt_bag/message_listener_thread.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | 1 | 2021-07-08T10:26:06.000Z | 2021-07-08T10:31:11.000Z | melodic/lib/python2.7/dist-packages/rqt_bag/message_listener_thread.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | null | null | null | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import threading
from python_qt_binding.QtCore import QCoreApplication, QEvent
from python_qt_binding.QtCore import qWarning
class ListenerEvent(QEvent):
def __init__(self, data):
super(ListenerEvent, self).__init__(QEvent.User)
self.data = data
class MessageListenerThread(threading.Thread):
"""
Waits for new messages loaded on the given topic, then calls the message listener.
One thread per listener, topic pair.
"""
def __init__(self, timeline, topic, listener):
threading.Thread.__init__(self)
self.timeline = timeline
self.topic = topic
self.listener = listener
self.bag_msg_data = None
self._stop_flag = False
self.setDaemon(True)
self.start()
def run(self):
"""
Thread body. loops and notifies the listener of new messages
"""
while not self._stop_flag:
# Wait for a new message
cv = self.timeline._messages_cvs[self.topic]
with cv:
while (self.topic not in self.timeline._messages) or (self.bag_msg_data == self.timeline._messages[self.topic]):
cv.wait()
if self._stop_flag:
return
bag_msg_data = self.timeline._messages[self.topic]
# View the message
self.bag_msg_data = bag_msg_data
try:
event = ListenerEvent(bag_msg_data)
QCoreApplication.postEvent(self.listener, event)
except Exception as ex:
qWarning('Error notifying listener %s: %s' % (type(self.listener), str(ex)))
def stop(self):
self._stop_flag = True
cv = self.timeline._messages_cvs[self.topic]
with cv:
cv.notify_all()
| 37.988889 | 128 | 0.684411 |
5fbcf79f16ad484e6b9741754fc11c4fa60bc759 | 2,942 | py | Python | lib/galaxy/work/context.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 47 | 2015-10-21T23:30:30.000Z | 2022-03-09T06:51:32.000Z | lib/galaxy/work/context.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 20 | 2015-09-30T18:56:40.000Z | 2019-04-12T19:32:59.000Z | lib/galaxy/work/context.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 35 | 2015-10-30T13:09:40.000Z | 2021-05-03T23:17:46.000Z | from typing import Optional
from galaxy.managers.context import (
ProvidesHistoryContext,
)
from galaxy.model import History
class WorkRequestContext(ProvidesHistoryContext):
""" Stripped down implementation of Galaxy web transaction god object for
work request handling outside of web threads - uses mix-ins shared with
GalaxyWebTransaction to provide app, user, and history context convenience
methods - but nothing related to HTTP handling, mako views, etc....
Things that only need app shouldn't be consuming trans - but there is a
need for actions potentially tied to users and histories and hopefully
this can define that stripped down interface providing access to user and
history information - but not dealing with web request and response
objects.
"""
def __init__(self, app, user=None, history=None, workflow_building_mode=False, qualified_url_builder=None):
self._app = app
self.__user = user
self.__user_current_roles = None
self.__history = history
self._qualified_url_builder = qualified_url_builder
self.workflow_building_mode = workflow_building_mode
@property
def app(self):
return self._app
@property
def qualified_url_builder(self):
return self._qualified_url_builder
def get_history(self, create=False):
return self.__history
@property
def history(self):
return self.get_history()
def get_user(self):
"""Return the current user if logged in or None."""
return self.__user
def get_current_user_roles(self):
if self.__user_current_roles is None:
self.__user_current_roles = super().get_current_user_roles()
return self.__user_current_roles
def set_user(self, user):
"""Set the current user."""
raise NotImplementedError("Cannot change users from a work request context.")
user = property(get_user, set_user)
class SessionRequestContext(WorkRequestContext):
"""Like WorkRequestContext, but provides access to galaxy session and session."""
def __init__(self, **kwargs):
self.galaxy_session = kwargs.pop('galaxy_session', None)
super().__init__(**kwargs)
def get_galaxy_session(self):
return self.galaxy_session
def proxy_work_context_for_history(trans: ProvidesHistoryContext, history: Optional[History] = None, workflow_building_mode=False):
"""Create a WorkContext for supplied context with potentially different history.
This provides semi-structured access to a transaction/work context with a supplied target
history that is different from the user's current history (which also might change during
the request).
"""
return WorkRequestContext(app=trans.app, user=trans.user, history=history or trans.history, qualified_url_builder=trans.qualified_url_builder, workflow_building_mode=workflow_building_mode)
| 37.240506 | 193 | 0.733175 |
5d22f0e04dc9613c9b614b4b0396c1891975caea | 358 | py | Python | tests/test_custom_data.py | xashru/tner | f61f7a377ad20f4bb3dd4c920b32151acdd29ea1 | [
"MIT"
] | 142 | 2020-12-10T22:32:33.000Z | 2022-03-16T14:55:03.000Z | tests/test_custom_data.py | xashru/tner | f61f7a377ad20f4bb3dd4c920b32151acdd29ea1 | [
"MIT"
] | 8 | 2021-03-08T08:54:00.000Z | 2022-01-24T13:59:48.000Z | tests/test_custom_data.py | xashru/tner | f61f7a377ad20f4bb3dd4c920b32151acdd29ea1 | [
"MIT"
] | 14 | 2021-02-15T11:28:19.000Z | 2022-01-19T17:44:51.000Z | from tner import TrainTransformersNER
trainer = TrainTransformersNER(
checkpoint_dir='./ckpt',
dataset='/Users/asahi/Desktop/data_seyyaw',
transformers_model='xlm-roberta-base',
random_seed=1234,
lr=1e-5,
total_step=10,
warmup_step=10,
batch_size=1,
max_seq_length=128)
trainer.train()
| 27.538462 | 51 | 0.645251 |
6ef418578c42186a66c362a6ef7b2c3dc98082d3 | 485 | py | Python | output/models/nist_data/list_pkg/unsigned_byte/schema_instance/nistschema_sv_iv_list_unsigned_byte_length_5_xsd/nistschema_sv_iv_list_unsigned_byte_length_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/unsigned_byte/schema_instance/nistschema_sv_iv_list_unsigned_byte_length_5_xsd/nistschema_sv_iv_list_unsigned_byte_length_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/unsigned_byte/schema_instance/nistschema_sv_iv_list_unsigned_byte_length_5_xsd/nistschema_sv_iv_list_unsigned_byte_length_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-unsignedByte-length-5-NS"
@dataclass
class NistschemaSvIvListUnsignedByteLength5:
class Meta:
name = "NISTSchema-SV-IV-list-unsignedByte-length-5"
namespace = "NISTSchema-SV-IV-list-unsignedByte-length-5-NS"
value: List[int] = field(
default_factory=list,
metadata={
"length": 10,
"tokens": True,
}
)
| 24.25 | 68 | 0.65567 |
0556bc2c7a9dde5b947749acee148cca00931cdb | 15,009 | py | Python | homeassistant/components/stream/__init__.py | PandaBaer92/core | 4b30c9631f0f0a1ad59005f316b3f03975d2accd | [
"Apache-2.0"
] | 2 | 2019-05-25T15:03:29.000Z | 2019-06-30T08:32:24.000Z | homeassistant/components/stream/__init__.py | PandaBaer92/core | 4b30c9631f0f0a1ad59005f316b3f03975d2accd | [
"Apache-2.0"
] | 83 | 2020-07-31T21:32:45.000Z | 2022-03-31T10:00:54.000Z | homeassistant/components/stream/__init__.py | PandaBaer92/core | 4b30c9631f0f0a1ad59005f316b3f03975d2accd | [
"Apache-2.0"
] | 4 | 2020-10-29T22:56:04.000Z | 2020-12-12T21:51:40.000Z | """Provide functionality to stream video source.
Components use create_stream with a stream source (e.g. an rtsp url) to create
a new Stream object. Stream manages:
- Background work to fetch and decode a stream
- Desired output formats
- Home Assistant URLs for viewing a stream
- Access tokens for URLs for viewing a stream
A Stream consists of a background worker, and one or more output formats each
with their own idle timeout managed by the stream component. When an output
format is no longer in use, the stream component will expire it. When there
are no active output formats, the background worker is shut down and access
tokens are expired. Alternatively, a Stream can be configured with keepalive
to always keep workers active.
"""
from __future__ import annotations
from collections.abc import Callable, Mapping
import logging
import re
import secrets
import threading
import time
from types import MappingProxyType
from typing import cast
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_ENDPOINTS,
ATTR_SETTINGS,
ATTR_STREAMS,
CONF_LL_HLS,
CONF_PART_DURATION,
CONF_SEGMENT_DURATION,
DOMAIN,
HLS_PROVIDER,
MAX_SEGMENTS,
OUTPUT_IDLE_TIMEOUT,
RECORDER_PROVIDER,
SEGMENT_DURATION_ADJUSTER,
STREAM_RESTART_INCREMENT,
STREAM_RESTART_RESET_TIME,
TARGET_SEGMENT_DURATION_NON_LL_HLS,
)
from .core import PROVIDERS, IdleTimer, StreamOutput, StreamSettings
from .hls import HlsStreamOutput, async_setup_hls
_LOGGER = logging.getLogger(__name__)
STREAM_SOURCE_REDACT_PATTERN = [
(re.compile(r"//.*:.*@"), "//****:****@"),
(re.compile(r"\?auth=.*"), "?auth=****"),
]
def redact_credentials(data: str) -> str:
"""Redact credentials from string data."""
for (pattern, repl) in STREAM_SOURCE_REDACT_PATTERN:
data = pattern.sub(repl, data)
return data
def create_stream(
hass: HomeAssistant,
stream_source: str,
options: dict[str, str],
stream_label: str | None = None,
) -> Stream:
"""Create a stream with the specified identfier based on the source url.
The stream_source is typically an rtsp url (though any url accepted by ffmpeg is fine) and
options are passed into pyav / ffmpeg as options.
The stream_label is a string used as an additional message in logging.
"""
if DOMAIN not in hass.config.components:
raise HomeAssistantError("Stream integration is not set up.")
# For RTSP streams, prefer TCP
if isinstance(stream_source, str) and stream_source[:7] == "rtsp://":
options = {
"rtsp_flags": "prefer_tcp",
"stimeout": "5000000",
**options,
}
stream = Stream(hass, stream_source, options=options, stream_label=stream_label)
hass.data[DOMAIN][ATTR_STREAMS].append(stream)
return stream
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_LL_HLS, default=False): cv.boolean,
vol.Optional(CONF_SEGMENT_DURATION, default=6): vol.All(
cv.positive_float, vol.Range(min=2, max=10)
),
vol.Optional(CONF_PART_DURATION, default=1): vol.All(
cv.positive_float, vol.Range(min=0.2, max=1.5)
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def filter_libav_logging() -> None:
"""Filter libav logging to only log when the stream logger is at DEBUG."""
stream_debug_enabled = logging.getLogger(__name__).isEnabledFor(logging.DEBUG)
def libav_filter(record: logging.LogRecord) -> bool:
return stream_debug_enabled
for logging_namespace in (
"libav.mp4",
"libav.h264",
"libav.hevc",
"libav.rtsp",
"libav.tcp",
"libav.tls",
"libav.mpegts",
"libav.NULL",
):
logging.getLogger(logging_namespace).addFilter(libav_filter)
# Set log level to error for libav.mp4
logging.getLogger("libav.mp4").setLevel(logging.ERROR)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up stream."""
# Drop libav log messages if stream logging is above DEBUG
filter_libav_logging()
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import async_setup_recorder
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_ENDPOINTS] = {}
hass.data[DOMAIN][ATTR_STREAMS] = []
if (conf := config.get(DOMAIN)) and conf[CONF_LL_HLS]:
assert isinstance(conf[CONF_SEGMENT_DURATION], float)
assert isinstance(conf[CONF_PART_DURATION], float)
hass.data[DOMAIN][ATTR_SETTINGS] = StreamSettings(
ll_hls=True,
min_segment_duration=conf[CONF_SEGMENT_DURATION]
- SEGMENT_DURATION_ADJUSTER,
part_target_duration=conf[CONF_PART_DURATION],
hls_advance_part_limit=max(int(3 / conf[CONF_PART_DURATION]), 3),
hls_part_timeout=2 * conf[CONF_PART_DURATION],
)
else:
hass.data[DOMAIN][ATTR_SETTINGS] = StreamSettings(
ll_hls=False,
min_segment_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS
- SEGMENT_DURATION_ADJUSTER,
part_target_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS,
hls_advance_part_limit=3,
hls_part_timeout=TARGET_SEGMENT_DURATION_NON_LL_HLS,
)
# Setup HLS
hls_endpoint = async_setup_hls(hass)
hass.data[DOMAIN][ATTR_ENDPOINTS][HLS_PROVIDER] = hls_endpoint
# Setup Recorder
async_setup_recorder(hass)
@callback
def shutdown(event: Event) -> None:
"""Stop all stream workers."""
for stream in hass.data[DOMAIN][ATTR_STREAMS]:
stream.keepalive = False
stream.stop()
_LOGGER.info("Stopped stream workers")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class Stream:
"""Represents a single stream."""
def __init__(
self,
hass: HomeAssistant,
source: str,
options: dict[str, str],
stream_label: str | None = None,
) -> None:
"""Initialize a stream."""
self.hass = hass
self.source = source
self.options = options
self._stream_label = stream_label
self.keepalive = False
self.access_token: str | None = None
self._thread: threading.Thread | None = None
self._thread_quit = threading.Event()
self._outputs: dict[str, StreamOutput] = {}
self._fast_restart_once = False
self._available: bool = True
self._update_callback: Callable[[], None] | None = None
self._logger = (
logging.getLogger(f"{__package__}.stream.{stream_label}")
if stream_label
else _LOGGER
)
def endpoint_url(self, fmt: str) -> str:
"""Start the stream and returns a url for the output format."""
if fmt not in self._outputs:
raise ValueError(f"Stream is not configured for format '{fmt}'")
if not self.access_token:
self.access_token = secrets.token_hex()
endpoint_fmt: str = self.hass.data[DOMAIN][ATTR_ENDPOINTS][fmt]
return endpoint_fmt.format(self.access_token)
def outputs(self) -> Mapping[str, StreamOutput]:
"""Return a copy of the stream outputs."""
# A copy is returned so the caller can iterate through the outputs
# without concern about self._outputs being modified from another thread.
return MappingProxyType(self._outputs.copy())
def add_provider(
self, fmt: str, timeout: int = OUTPUT_IDLE_TIMEOUT
) -> StreamOutput:
"""Add provider output stream."""
if not self._outputs.get(fmt):
@callback
def idle_callback() -> None:
if (
not self.keepalive or fmt == RECORDER_PROVIDER
) and fmt in self._outputs:
self.remove_provider(self._outputs[fmt])
self.check_idle()
provider = PROVIDERS[fmt](
self.hass, IdleTimer(self.hass, timeout, idle_callback)
)
self._outputs[fmt] = provider
return self._outputs[fmt]
def remove_provider(self, provider: StreamOutput) -> None:
"""Remove provider output stream."""
if provider.name in self._outputs:
self._outputs[provider.name].cleanup()
del self._outputs[provider.name]
if not self._outputs:
self.stop()
def check_idle(self) -> None:
"""Reset access token if all providers are idle."""
if all(p.idle for p in self._outputs.values()):
self.access_token = None
@property
def available(self) -> bool:
"""Return False if the stream is started and known to be unavailable."""
return self._available
def set_update_callback(self, update_callback: Callable[[], None]) -> None:
"""Set callback to run when state changes."""
self._update_callback = update_callback
@callback
def _async_update_state(self, available: bool) -> None:
"""Set state and Run callback to notify state has been updated."""
self._available = available
if self._update_callback:
self._update_callback()
def start(self) -> None:
"""Start a stream."""
if self._thread is None or not self._thread.is_alive():
if self._thread is not None:
# The thread must have crashed/exited. Join to clean up the
# previous thread.
self._thread.join(timeout=0)
self._thread_quit.clear()
self._thread = threading.Thread(
name="stream_worker",
target=self._run_worker,
)
self._thread.start()
self._logger.info(
"Started stream: %s", redact_credentials(str(self.source))
)
def update_source(self, new_source: str) -> None:
"""Restart the stream with a new stream source."""
self._logger.debug("Updating stream source %s", new_source)
self.source = new_source
self._fast_restart_once = True
self._thread_quit.set()
def _run_worker(self) -> None:
"""Handle consuming streams and restart keepalive streams."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .worker import StreamState, StreamWorkerError, stream_worker
stream_state = StreamState(self.hass, self.outputs)
wait_timeout = 0
while not self._thread_quit.wait(timeout=wait_timeout):
start_time = time.time()
self.hass.add_job(self._async_update_state, True)
try:
stream_worker(
self.source,
self.options,
stream_state,
self._thread_quit,
)
except StreamWorkerError as err:
self._logger.error("Error from stream worker: %s", str(err))
self._available = False
stream_state.discontinuity()
if not self.keepalive or self._thread_quit.is_set():
if self._fast_restart_once:
# The stream source is updated, restart without any delay.
self._fast_restart_once = False
self._thread_quit.clear()
continue
break
self.hass.add_job(self._async_update_state, False)
# To avoid excessive restarts, wait before restarting
# As the required recovery time may be different for different setups, start
# with trying a short wait_timeout and increase it on each reconnection attempt.
# Reset the wait_timeout after the worker has been up for several minutes
if time.time() - start_time > STREAM_RESTART_RESET_TIME:
wait_timeout = 0
wait_timeout += STREAM_RESTART_INCREMENT
self._logger.debug(
"Restarting stream worker in %d seconds: %s",
wait_timeout,
self.source,
)
self._worker_finished()
def _worker_finished(self) -> None:
"""Schedule cleanup of all outputs."""
@callback
def remove_outputs() -> None:
for provider in self.outputs().values():
self.remove_provider(provider)
self.hass.loop.call_soon_threadsafe(remove_outputs)
def stop(self) -> None:
"""Remove outputs and access token."""
self._outputs = {}
self.access_token = None
if not self.keepalive:
self._stop()
def _stop(self) -> None:
"""Stop worker thread."""
if self._thread is not None:
self._thread_quit.set()
self._thread.join()
self._thread = None
self._logger.info(
"Stopped stream: %s", redact_credentials(str(self.source))
)
async def async_record(
self, video_path: str, duration: int = 30, lookback: int = 5
) -> None:
"""Make a .mp4 recording from a provided stream."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import RecorderOutput
# Check for file access
if not self.hass.config.is_allowed_path(video_path):
raise HomeAssistantError(f"Can't write {video_path}, no access to path!")
# Add recorder
if recorder := self.outputs().get(RECORDER_PROVIDER):
assert isinstance(recorder, RecorderOutput)
raise HomeAssistantError(
f"Stream already recording to {recorder.video_path}!"
)
recorder = cast(
RecorderOutput, self.add_provider(RECORDER_PROVIDER, timeout=duration)
)
recorder.video_path = video_path
self.start()
self._logger.debug("Started a stream recording of %s seconds", duration)
# Take advantage of lookback
hls: HlsStreamOutput = cast(HlsStreamOutput, self.outputs().get(HLS_PROVIDER))
if lookback > 0 and hls:
num_segments = min(int(lookback // hls.target_duration), MAX_SEGMENTS)
# Wait for latest segment, then add the lookback
await hls.recv()
recorder.prepend(list(hls.get_segments())[-num_segments:])
| 35.566351 | 94 | 0.632754 |
d178004572d7706d76d407f4c4f986faa5244a47 | 4,311 | py | Python | AdelaiDet/detectron2/tests/data/test_dataset.py | km1562/AdelaiDet2 | 293cd6410631d36145f9ae4eb06a63520c66b92d | [
"Apache-2.0"
] | null | null | null | AdelaiDet/detectron2/tests/data/test_dataset.py | km1562/AdelaiDet2 | 293cd6410631d36145f9ae4eb06a63520c66b92d | [
"Apache-2.0"
] | null | null | null | AdelaiDet/detectron2/tests/data/test_dataset.py | km1562/AdelaiDet2 | 293cd6410631d36145f9ae4eb06a63520c66b92d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
import os
import pickle
import sys
import unittest
from functools import partial
import torch
from iopath.common.file_io import LazyPath
from detectron2 import model_zoo
from detectron2.config import instantiate
from detectron2.data import (
DatasetFromList,
MapDataset,
ToIterableDataset,
build_batch_data_loader,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.data.samplers import InferenceSampler, TrainingSampler
def _a_slow_func(x):
return "path/{}".format(x)
class TestDatasetFromList(unittest.TestCase):
# Failing for py3.6, likely due to pickle
@unittest.skipIf(sys.version_info.minor <= 6, "Not supported in Python 3.6")
def test_using_lazy_path(self):
dataset = []
for i in range(10):
dataset.append({"file_name": LazyPath(partial(_a_slow_func, i))})
dataset = DatasetFromList(dataset)
for i in range(10):
path = dataset[i]["file_name"]
self.assertTrue(isinstance(path, LazyPath))
self.assertEqual(os.fspath(path), _a_slow_func(i))
class TestMapDataset(unittest.TestCase):
@staticmethod
def map_func(x):
if x == 2:
return None
return x * 2
def test_map_style(self):
ds = DatasetFromList([1, 2, 3])
ds = MapDataset(ds, TestMapDataset.map_func)
self.assertEqual(ds[0], 2)
self.assertEqual(ds[2], 6)
self.assertIn(ds[1], [2, 6])
def test_iter_style(self):
class DS(torch.utils.data.IterableDataset):
def __iter__(self):
yield from [1, 2, 3]
ds = DS()
ds = MapDataset(ds, TestMapDataset.map_func)
self.assertIsInstance(ds, torch.utils.data.IterableDataset)
data = list(iter(ds))
self.assertEqual(data, [2, 6])
def test_pickleability(self):
ds = DatasetFromList([1, 2, 3])
ds = MapDataset(ds, lambda x: x * 2)
ds = pickle.loads(pickle.dumps(ds))
self.assertEqual(ds[0], 2)
@unittest.skipIf(os.environ.get("CI"), "Skipped OSS testing due to COCO datas requirement.")
class TestDataLoader(unittest.TestCase):
def _get_kwargs(self):
# get kwargs of build_detection_train_loader
cfg = model_zoo.get_config("common/datas/coco.py").dataloader.train
cfg.dataset.names = "coco_2017_val_100"
cfg.pop("_target_")
kwargs = {k: instantiate(v) for k, v in cfg.items()}
return kwargs
def test_build_dataloader_train(self):
kwargs = self._get_kwargs()
dl = build_detection_train_loader(**kwargs)
next(iter(dl))
def test_build_iterable_dataloader_train(self):
kwargs = self._get_kwargs()
ds = DatasetFromList(kwargs.pop("dataset"))
ds = ToIterableDataset(ds, TrainingSampler(len(ds)))
dl = build_detection_train_loader(dataset=ds, **kwargs)
next(iter(dl))
def _check_is_range(self, data_loader, N):
# check that data_loader produces range(N)
data = list(iter(data_loader))
data = [x for batch in data for x in batch] # flatten the batches
self.assertEqual(len(data), N)
self.assertEqual(set(data), set(range(N)))
def test_build_batch_dataloader_inference(self):
# Test that build_batch_data_loader can be used for inference
N = 96
ds = DatasetFromList(list(range(N)))
sampler = InferenceSampler(len(ds))
dl = build_batch_data_loader(ds, sampler, 8, num_workers=3)
self._check_is_range(dl, N)
def test_build_dataloader_inference(self):
N = 50
ds = DatasetFromList(list(range(N)))
sampler = InferenceSampler(len(ds))
dl = build_detection_test_loader(
dataset=ds, sampler=sampler, mapper=lambda x: x, num_workers=3
)
self._check_is_range(dl, N)
def test_build_iterable_dataloader_inference(self):
# Test that build_detection_test_loader supports iterable dataset
N = 50
ds = DatasetFromList(list(range(N)))
ds = ToIterableDataset(ds, InferenceSampler(len(ds)))
dl = build_detection_test_loader(dataset=ds, mapper=lambda x: x, num_workers=3)
self._check_is_range(dl, N)
| 33.418605 | 92 | 0.658084 |
79a76d4826a56b03940d6e6a931903a8695c9db6 | 4,284 | py | Python | ironic/api/controllers/v1/versions.py | NaohiroTamura/ironic | 1fcb6c52a22c9c025dbf27931720ce2eda08704f | [
"Apache-2.0"
] | null | null | null | ironic/api/controllers/v1/versions.py | NaohiroTamura/ironic | 1fcb6c52a22c9c025dbf27931720ce2eda08704f | [
"Apache-2.0"
] | null | null | null | ironic/api/controllers/v1/versions.py | NaohiroTamura/ironic | 1fcb6c52a22c9c025dbf27931720ce2eda08704f | [
"Apache-2.0"
] | 1 | 2022-03-25T14:26:10.000Z | 2022-03-25T14:26:10.000Z | # Copyright (c) 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is the version 1 API
BASE_VERSION = 1
# Here goes a short log of changes in every version.
# Refer to doc/source/dev/webapi-version-history.rst for a detailed explanation
# of what each version contains.
#
# v1.0: corresponds to Juno API, not supported since Kilo
# v1.1: API at the point in time when versioning support was added,
# covers the following commits from Kilo cycle:
# 827db7fe: Add Node.maintenance_reason
# 68eed82b: Add API endpoint to set/unset the node maintenance mode
# bc973889: Add sync and async support for passthru methods
# e03f443b: Vendor endpoints to support different HTTP methods
# e69e5309: Make vendor methods discoverable via the Ironic API
# edf532db: Add logic to store the config drive passed by Nova
# v1.2: Renamed NOSTATE ("None") to AVAILABLE ("available")
# v1.3: Add node.driver_internal_info
# v1.4: Add MANAGEABLE state
# v1.5: Add logical node names
# v1.6: Add INSPECT* states
# v1.7: Add node.clean_step
# v1.8: Add ability to return a subset of resource fields
# v1.9: Add ability to filter nodes by provision state
# v1.10: Logical node names support RFC 3986 unreserved characters
# v1.11: Nodes appear in ENROLL state by default
# v1.12: Add support for RAID
# v1.13: Add 'abort' verb to CLEANWAIT
# v1.14: Make the following endpoints discoverable via API:
# 1. '/v1/nodes/<uuid>/states'
# 2. '/v1/drivers/<driver-name>/properties'
# v1.15: Add ability to do manual cleaning of nodes
# v1.16: Add ability to filter nodes by driver.
# v1.17: Add 'adopt' verb for ADOPTING active nodes.
# v1.18: Add port.internal_info.
# v1.19: Add port.local_link_connection and port.pxe_enabled.
# v1.20: Add node.network_interface
# v1.21: Add node.resource_class
# v1.22: Ramdisk lookup and heartbeat endpoints.
# v1.23: Add portgroup support.
# v1.24: Add subcontrollers: node.portgroup, portgroup.ports.
# Add port.portgroup_uuid field.
# v1.25: Add possibility to unset chassis_uuid from node.
# v1.26: Add portgroup.mode and portgroup.properties.
# v1.27: Add soft reboot, soft power off and timeout.
# v1.28: Add vifs subcontroller to node
# v1.29: Add inject nmi.
# v1.30: Add dynamic driver interactions.
# v1.31: Add dynamic interfaces fields to node.
MINOR_0_JUNO = 0
MINOR_1_INITIAL_VERSION = 1
MINOR_2_AVAILABLE_STATE = 2
MINOR_3_DRIVER_INTERNAL_INFO = 3
MINOR_4_MANAGEABLE_STATE = 4
MINOR_5_NODE_NAME = 5
MINOR_6_INSPECT_STATE = 6
MINOR_7_NODE_CLEAN = 7
MINOR_8_FETCHING_SUBSET_OF_FIELDS = 8
MINOR_9_PROVISION_STATE_FILTER = 9
MINOR_10_UNRESTRICTED_NODE_NAME = 10
MINOR_11_ENROLL_STATE = 11
MINOR_12_RAID_CONFIG = 12
MINOR_13_ABORT_VERB = 13
MINOR_14_LINKS_NODESTATES_DRIVERPROPERTIES = 14
MINOR_15_MANUAL_CLEAN = 15
MINOR_16_DRIVER_FILTER = 16
MINOR_17_ADOPT_VERB = 17
MINOR_18_PORT_INTERNAL_INFO = 18
MINOR_19_PORT_ADVANCED_NET_FIELDS = 19
MINOR_20_NETWORK_INTERFACE = 20
MINOR_21_RESOURCE_CLASS = 21
MINOR_22_LOOKUP_HEARTBEAT = 22
MINOR_23_PORTGROUPS = 23
MINOR_24_PORTGROUPS_SUBCONTROLLERS = 24
MINOR_25_UNSET_CHASSIS_UUID = 25
MINOR_26_PORTGROUP_MODE_PROPERTIES = 26
MINOR_27_SOFT_POWER_OFF = 27
MINOR_28_VIFS_SUBCONTROLLER = 28
MINOR_29_INJECT_NMI = 29
MINOR_30_DYNAMIC_DRIVERS = 30
MINOR_31_DYNAMIC_INTERFACES = 31
# When adding another version, update MINOR_MAX_VERSION and also update
# doc/source/dev/webapi-version-history.rst with a detailed explanation of
# what the version has changed.
MINOR_MAX_VERSION = MINOR_31_DYNAMIC_INTERFACES
# String representations of the minor and maximum versions
MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION)
MAX_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_MAX_VERSION)
| 40.037383 | 79 | 0.777311 |
ebadb5c9dc46dce78e60a3d12982e18ffe4a0475 | 3,655 | py | Python | tests/test_functional.py | j127/fca | c39e6dde48aefca0ab1d280cc9af03e2bbe9edee | [
"BSD-3-Clause"
] | 1 | 2015-11-06T06:08:08.000Z | 2015-11-06T06:08:08.000Z | tests/test_functional.py | j127/fca | c39e6dde48aefca0ab1d280cc9af03e2bbe9edee | [
"BSD-3-Clause"
] | null | null | null | tests/test_functional.py | j127/fca | c39e6dde48aefca0ab1d280cc9af03e2bbe9edee | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
import pytest
from flask import url_for
from fca.user.models import User
from .factories import UserFactory
class TestLoggingIn:
def test_can_log_in_returns_200(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert "Invalid password" in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert "Unknown user" in res
class TestRegistering:
def test_can_register(self, user, testapp):
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Create account")
# Fills out the form
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but passwords don't match
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert "Passwords must match" in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but username is already registered
form = res.forms["registerForm"]
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert "Username already registered" in res
| 32.345133 | 80 | 0.595622 |
69c854cd9a369bb787879909f89e1ac0da5bfacf | 24,911 | py | Python | sunpy/coordinates/frames.py | eteq/sunpy | 706bb1dc7bbe77bc54791dbc1d86af9aaaf5af43 | [
"BSD-2-Clause"
] | null | null | null | sunpy/coordinates/frames.py | eteq/sunpy | 706bb1dc7bbe77bc54791dbc1d86af9aaaf5af43 | [
"BSD-2-Clause"
] | null | null | null | sunpy/coordinates/frames.py | eteq/sunpy | 706bb1dc7bbe77bc54791dbc1d86af9aaaf5af43 | [
"BSD-2-Clause"
] | null | null | null | """
Common solar physics coordinate systems.
This submodule implements various solar physics coordinate frames for use with
the `astropy.coordinates` module.
"""
import numpy as np
import astropy.units as u
from astropy.coordinates import Attribute, ConvertError
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.representation import (
CartesianDifferential,
CartesianRepresentation,
CylindricalRepresentation,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.time import Time
from sunpy.sun.constants import radius as _RSUN
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from .frameattributes import ObserverCoordinateAttribute, TimeFrameAttributeSunPy
_J2000 = Time('J2000.0', scale='tt')
__all__ = ['SunPyBaseCoordinateFrame', 'BaseHeliographic',
'HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective',
'HeliocentricEarthEcliptic', 'GeocentricSolarEcliptic',
'HeliocentricInertial', 'GeocentricEarthEquatorial']
def _frame_parameters():
"""
Returns formatting dictionary to use with add_common_docstring to populate frame docstrings
"""
ret = {}
# Each text block is missing the first indent because it already exists in the frame docstring
ret['data'] = ("data : `~astropy.coordinates.BaseRepresentation` or ``None``\n"
" A representation object or ``None`` to have no data\n"
" (or use the coordinate component arguments, see below).")
ret['common'] = (f"obstime : {_variables_for_parse_time_docstring()['parse_time_types']}\n"
" The time of the observation. This is used to determine the\n"
" position of solar-system bodies (e.g., the Sun and the Earth) as\n"
" needed to define the origin and orientation of the frame.\n"
" representation_type : `~astropy.coordinates.BaseRepresentation`, str, optional\n"
" A representation class or string name of a representation class.\n"
" This may change the valid coordinate component arguments from the\n"
" defaults (see above). For example, passing\n"
" ``representation_type='cartesian'`` will make the frame expect\n"
" Cartesian coordinate component arguments (typically, ``x``, ``y``,\n"
" and ``z``).\n"
" copy : bool, optional\n"
" If `True` (default), make copies of the input coordinate arrays.")
ret['lonlat'] = ("lon : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The longitude coordinate for this object (``lat`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.\n"
" lat : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The latitude coordinate for this object (``lon`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.")
ret['radius'] = ("radius : `~astropy.units.Quantity`, optional\n"
" The radial distance coordinate from Sun center for this object.\n"
" Defaults to the radius of the Sun. Not needed if ``data`` is given.")
ret['distance_sun'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Sun center for this object.\n"
" Not needed if ``data`` is given.")
ret['distance_earth'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Earth center for this object.\n"
" Not needed if ``data`` is given.")
ret['xyz'] = ("x : `~astropy.units.Quantity`, optional\n"
" X-axis coordinate for this object. Not needed if ``data`` is given.\n"
" y : `~astropy.units.Quantity`, optional\n"
" Y-axis coordinate for this object. Not needed if ``data`` is given.\n"
" z : `~astropy.units.Quantity`, optional\n"
" Z-axis coordinate for this object. Not needed if ``data`` is given.")
ret['observer'] = ("observer : `~sunpy.coordinates.frames.HeliographicStonyhurst`, str\n"
" The location of the observer. If a string is provided,\n"
" it must be a solar system body that can be parsed by\n"
" `~sunpy.coordinates.ephemeris.get_body_heliographic_stonyhurst`\n"
" at the time ``obstime``. Defaults to Earth center.")
ret['equinox'] = (f"equinox : {_variables_for_parse_time_docstring()['parse_time_types']}\n"
" The date for the mean vernal equinox.\n"
" Defaults to the J2000.0 equinox.")
return ret
class SunPyBaseCoordinateFrame(BaseCoordinateFrame):
"""
Base class for sunpy coordinate frames.
This class is not intended to be used directly and has no transformations defined.
* Defines the frame attribute ``obstime`` for observation time.
* Defines a default longitude wrap angle of 180 degrees, which can be overridden via the class
variable ``_wrap_angle``.
* Inject a nice way of representing the object which the coordinate represents.
"""
obstime = TimeFrameAttributeSunPy()
default_representation = SphericalRepresentation
default_differential = SphericalDifferential
frame_specific_representation_info = {
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
}
_wrap_angle = 180*u.deg
def __init__(self, *args, **kwargs):
self.object_name = None
# If wrap_longitude=False is passed in, do not impose a specific wrap angle for the frame
if not kwargs.pop('wrap_longitude', True):
self._wrap_angle = None
super().__init__(*args, **kwargs)
# If obstime is specified, treat the default observer (Earth) as explicitly set
if self.obstime is not None and self.is_frame_attr_default('observer'):
self._attr_names_with_defaults.remove('observer')
return
def represent_as(self, base, s='base', in_frame_units=False):
"""
If a frame wrap angle is set, use that wrap angle for any spherical representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
if self._wrap_angle is not None and \
isinstance(data, (UnitSphericalRepresentation, SphericalRepresentation)):
data.lon.wrap_angle = self._wrap_angle
return data
def __str__(self):
"""
We override this here so that when you print a SkyCoord it shows the
observer as the string and not the whole massive coordinate.
"""
if getattr(self, "object_name", None):
return f"<{self.__class__.__name__} Coordinate for '{self.object_name}'>"
else:
return super().__str__()
class BaseHeliographic(SunPyBaseCoordinateFrame):
"""
Base class for HeliographicCarrington (HGC) and HeliographicStonyhurst (HGS) frames.
This class is not intended to be used directly and has no transformations defined.
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'lon', u.deg),
RepresentationMapping('lat', 'lat', u.deg),
RepresentationMapping('distance', 'radius', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_radius', u.km/u.s)],
}
def __init__(self, *args, **kwargs):
kwargs.get('representation_type', None)
super().__init__(*args, **kwargs)
# Make 3D if specified as 2D
if (self._data is not None and self._data.norm().unit is u.one
and u.allclose(self._data.norm(), 1*u.one)):
self._data *= _RSUN.to(u.km)
@add_common_docstring(**_frame_parameters())
class HeliographicStonyhurst(BaseHeliographic):
"""
A coordinate or frame in the Stonyhurst Heliographic (HGS) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the projection of
the Sun-Earth line onto the Sun's equatorial plane.
This system is also know as the Heliocentric Earth Equatorial (HEEQ) system when
represented using Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``representation_type`` must be
keyword arguments)::
HeliographicStonyhurst(lon, lat, obstime=obstime)
HeliographicStonyhurst(lon, lat, radius, obstime=obstime)
HeliographicStonyhurst(x, y, z, representation_type='cartesian', obstime=obstime)
Parameters
----------
{data}
{lonlat}
{radius}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km,
... frame="heliographic_stonyhurst",
... obstime="2010/01/01T00:00:45")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2010-01-01T00:00:45.000): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc.frame
<HeliographicStonyhurst Coordinate (obstime=2010-01-01T00:00:45.000): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc = SkyCoord(HeliographicStonyhurst(-10*u.deg, 2*u.deg))
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=None): (lon, lat, radius) in (deg, deg, km)
(-10., 2., 695700.)>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_stonyhurst")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2011-01-05T00:00:50.000): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
Notes
-----
This frame will always be converted a 3D frame where the radius defaults to
``rsun``.
"""
name = "heliographic_stonyhurst"
@add_common_docstring(**_frame_parameters())
class HeliographicCarrington(BaseHeliographic):
"""
A coordinate or frame in the Carrington Heliographic (HGC) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis and Y-axis rotate with a period of 25.38 days.
This system differs from Stonyhurst Heliographic (HGS) in its definition of longitude. This
longitude is an "apparent" longitude because it takes into account the time it takes for light
to travel from the Sun's surface to the observer. Thus, the observer needs to be specified to
be able to transform to any other coordinate frame.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be a keyword argument)::
HeliographicCarrington(lon, lat, obstime=obstime, observer=observer)
HeliographicCarrington(lon, lat, radius, obstime=obstime, observer=observer)
Parameters
----------
{data}
{lonlat}
{radius}
{observer}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km,
... frame="heliographic_carrington",
... observer="earth",
... obstime="2010/01/01T00:00:30")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:30.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (lon, lat, radius) in (deg, deg, km)
(1., 2., 3.)>
>>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km,
... obstime="2010/01/01T00:00:45", frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:45.000, observer=None): (lon, lat, radius) in (deg, deg, km)
[(1., 4., 5.), (2., 5., 6.), (3., 6., 7.)]>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2011-01-05T00:00:50.000, observer=None): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
"""
name = "heliographic_carrington"
_wrap_angle = 360*u.deg
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
@add_common_docstring(**_frame_parameters())
class Heliocentric(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric system, which is observer-based.
- The origin is the center of the Sun.
- The Z-axis is aligned with the Sun-observer line.
- The Y-axis is aligned with the component of the vector to the Sun's north pole that is
perpendicular to the Z-axis.
This frame defaults to a Cartesian component representation, which is known as Heliocentric
Cartesian (HCC). This frame can also be represented using cylindrical components, where
where ``rho`` is the impact parameter and ``psi`` is the position angle.
``psi`` is measured relative to the west limb, rather than solar north, so is shifted
by 90 degrees compared to the convention of the Heliocentric Radial (HCR) system.
A new instance can be created using the following signatures
(note that if supplied, ``obstime``, ``observer``, and ``representation_type`` must be
keyword arguments)::
Heliocentric(x, y, z, obstime=obstime, observer=observer)
Heliocentric(rho, psi, z, representation_type='cylindrical', obstime=obstime, observer=observer)
Parameters
----------
{data}
{xyz}
{observer}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(10., 1., 2.)>
>>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm,
... obstime="2011/01/01T00:00:54", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-01T00:00:54.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in (km, m, cm)
[(1., 3., 5.), (2., 4., 6.)]>
>>> sc = SkyCoord(CylindricalRepresentation(10*u.km, 60*u.deg, 10*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(5., 8.66025404, 10.)>
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
frame_specific_representation_info = {
CylindricalRepresentation: [RepresentationMapping('phi', 'psi', u.deg)]
}
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
@add_common_docstring(**_frame_parameters())
class Helioprojective(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Helioprojective Cartesian (HPC) system, which is observer-based.
- The origin is the location of the observer.
- ``Tx`` (aka "theta_x") is the angle relative to the plane containing the Sun-observer line
and the Sun's rotation axis, with positive values in the direction of the Sun's west limb.
- ``Ty`` (aka "theta_y") is the angle relative to the Sun's equatorial plane, with positive
values in the direction of the Sun's north pole.
- ``distance`` is the Sun-observer distance.
This system is frequently used in a projective form without ``distance`` specified. For
observations looking very close to the center of the Sun, where the small-angle approximation
is appropriate, ``Tx`` and ``Ty`` can be approximated as Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be keyword arguments)::
Helioprojective(Tx, Ty, obstime=obstime, observer=observer)
Helioprojective(Tx, Ty, distance, obstime=obstime, observer=observer)
Parameters
----------
{data}
Tx : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_x coordinate for this object. Not needed if ``data`` is given.
Ty : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_y coordinate for this object. Not needed if ``data`` is given.
distance : `~astropy.units.Quantity`
The distance coordinate from the observer for this object.
Not needed if ``data`` is given.
{observer}
rsun : `~astropy.units.Quantity`
The physical (length) radius of the Sun. Used to calculate the position
of the limb for calculating distance from the observer to the
coordinate. Defaults to the solar radius.
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(0*u.deg, 0*u.deg, 5*u.km,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, km)
(0., 0., 5.)>
>>> sc = SkyCoord(0*u.deg, 0*u.deg,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty) in arcsec
(0., 0.)>
>>> sc = SkyCoord(CartesianRepresentation(1*u.AU, 1e5*u.km, -2e5*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
(137.87948623, -275.75878762, 1.00000112)>
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec),
RepresentationMapping('distance', 'distance', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_Tx', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_Ty', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
UnitSphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec)],
}
rsun = Attribute(default=_RSUN.to(u.km))
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
def make_3d(self):
"""
This method calculates the third coordinate of the Helioprojective
frame. It assumes that the coordinate point is on the surface of the Sun.
If a point in the frame is off limb then NaN will be returned.
Returns
-------
new_frame : `~sunpy.coordinates.frames.Helioprojective`
A new frame instance with all the attributes of the original but
now with a third coordinate.
"""
# Skip if we already are 3D
distance = self.spherical.distance
if not (distance.unit is u.one and u.allclose(distance, 1*u.one)):
return self
if not isinstance(self.observer, BaseCoordinateFrame):
raise ConvertError("Cannot calculate distance to the Sun "
f"for observer '{self.observer}' "
"without `obstime` being specified.")
rep = self.represent_as(UnitSphericalRepresentation)
lat, lon = rep.lat, rep.lon
alpha = np.arccos(np.cos(lat) * np.cos(lon)).to(lat.unit)
c = self.observer.radius**2 - self.rsun**2
b = -2 * self.observer.radius * np.cos(alpha)
# Ingore sqrt of NaNs
with np.errstate(invalid='ignore'):
d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2
return self.realize_frame(SphericalRepresentation(lon=lon,
lat=lat,
distance=d))
@add_common_docstring(**_frame_parameters())
class HeliocentricEarthEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Earth Ecliptic (HEE) system.
- The origin is the center of the Sun.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Sun-Earth line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
"""
@add_common_docstring(**_frame_parameters())
class GeocentricSolarEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Solar Ecliptic (GSE) system.
- The origin is the center of the Earth.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Earth-Sun line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
@add_common_docstring(**_frame_parameters())
class HeliocentricInertial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Inertial (HCI) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the solar ascending
node on the ecliptic (mean J2000.0).
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
Notes
-----
The solar ascending node on the ecliptic lies on the intersection of the solar equatorial
plane with the ecliptic plane, not on the intersection of the celestial equatorial plane with
the ecliptic plane.
"""
@add_common_docstring(**_frame_parameters())
class GeocentricEarthEquatorial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Earth Equatorial (GEI) system.
- The origin is the center of the Earth.
- The Z-axis (+90 degrees latitude) is aligned with the Earth's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the mean (not true)
vernal equinox.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{equinox}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
equinox = TimeFrameAttributeSunPy(default=_J2000)
| 43.857394 | 184 | 0.631167 |
b926a242216b316fb746439dd60d61b0892262f9 | 7,635 | py | Python | maize/plotters/madmax.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 14 | 2021-07-21T19:45:05.000Z | 2022-02-09T04:29:51.000Z | maize/plotters/madmax.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 9 | 2021-07-24T09:30:46.000Z | 2021-12-05T19:51:29.000Z | maize/plotters/madmax.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 5 | 2021-10-04T17:33:47.000Z | 2022-03-15T08:37:51.000Z | import asyncio
import traceback
import os
import logging
import sys
from pathlib import Path
from typing import Any, Dict, Optional
from maize.plotting.create_plots import resolve_plot_keys
from maize.plotters.plotters_util import run_plotter, run_command
log = logging.getLogger(__name__)
MADMAX_PLOTTER_DIR = "madmax-plotter"
def is_madmax_supported() -> bool:
return sys.platform.startswith("linux") or sys.platform in ["darwin", "win32", "cygwin"]
def get_madmax_install_path(plotters_root_path: Path) -> Path:
return plotters_root_path / MADMAX_PLOTTER_DIR
def get_madmax_package_path() -> Path:
return Path(os.path.dirname(sys.executable)) / "madmax"
def get_madmax_executable_path_for_ksize(plotters_root_path: Path, ksize: int = 32) -> Path:
madmax_dir: Path = get_madmax_package_path()
madmax_exec: str = "chia_plot"
if ksize > 32:
madmax_exec += "_k34" # Use the chia_plot_k34 executable for k-sizes > 32
if sys.platform in ["win32", "cygwin"]:
madmax_exec += ".exe"
if not madmax_dir.exists():
madmax_dir = get_madmax_install_path(plotters_root_path) / "build"
return madmax_dir / madmax_exec
def get_madmax_install_info(plotters_root_path: Path) -> Optional[Dict[str, Any]]:
info: Dict[str, Any] = {"display_name": "madMAx Plotter"}
installed: bool = False
supported: bool = is_madmax_supported()
if get_madmax_executable_path_for_ksize(plotters_root_path).exists():
try:
proc = run_command(
[os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path)), "--version"],
"Failed to call madmax with --version option",
capture_output=True,
text=True,
)
version = proc.stdout.strip()
except Exception as e:
print(f"Failed to determine madmax version: {e}")
if version is not None:
installed = True
info["version"] = version
else:
installed = False
info["installed"] = installed
if installed is False:
info["can_install"] = supported
return info
def install_madmax(plotters_root_path: Path):
if is_madmax_supported():
print("Installing dependencies.")
if sys.platform.startswith("linux"):
run_command(
[
"sudo",
"apt",
"install",
"-y",
"libsodium-dev",
"cmake",
"g++",
"git",
"build-essential",
],
"Could not install dependencies",
)
if sys.platform.startswith("darwin"):
run_command(
[
"brew",
"install",
"libsodium",
"cmake",
"git",
"autoconf",
"automake",
"libtool",
"wget",
],
"Could not install dependencies",
)
run_command(["git", "--version"], "Error checking Git version.")
print("Cloning git repository.")
run_command(
[
"git",
"clone",
"https://github.com/Chia-Network/chia-plotter-madmax.git",
MADMAX_PLOTTER_DIR,
],
"Could not clone madmax git repository",
cwd=os.fspath(plotters_root_path),
)
print("Installing git submodules.")
madmax_path: str = os.fspath(get_madmax_install_path(plotters_root_path))
run_command(
[
"git",
"submodule",
"update",
"--init",
"--recursive",
],
"Could not initialize git submodules",
cwd=madmax_path,
)
print("Running install script.")
run_command(["./make_devel.sh"], "Error while running install script", cwd=madmax_path)
else:
raise RuntimeError("Platform not supported yet for madmax plotter.")
progress = {
"[P1] Table 1 took": 0.01,
"[P1] Table 2 took": 0.06,
"[P1] Table 3 took": 0.12,
"[P1] Table 4 took": 0.2,
"[P1] Table 5 took": 0.28,
"[P1] Table 6 took": 0.36,
"[P1] Table 7 took": 0.42,
"[P2] Table 7 rewrite took": 0.43,
"[P2] Table 6 rewrite took": 0.48,
"[P2] Table 5 rewrite took": 0.51,
"[P2] Table 4 rewrite took": 0.55,
"[P2] Table 3 rewrite took": 0.58,
"[P2] Table 2 rewrite took": 0.61,
"[P3-2] Table 2 took": 0.66,
"[P3-2] Table 3 took": 0.73,
"[P3-2] Table 4 took": 0.79,
"[P3-2] Table 5 took": 0.85,
"[P3-2] Table 6 took": 0.92,
"[P3-2] Table 7 took": 0.98,
}
def dir_with_trailing_slash(dir: str) -> str:
return dir if dir[-1] == os.path.sep else dir + os.path.sep
def plot_madmax(args, maize_root_path: Path, plotters_root_path: Path):
if sys.platform not in ["win32", "cygwin"]:
import resource
# madMAx has a ulimit -n requirement > 296:
# "Cannot open at least 296 files, please raise maximum open file limit in OS."
resource.setrlimit(resource.RLIMIT_NOFILE, (512, 512))
if not os.path.exists(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)):
print("Installing madmax plotter.")
try:
install_madmax(plotters_root_path)
except Exception as e:
print(f"Exception while installing madmax plotter: {e}")
return
plot_keys = asyncio.get_event_loop().run_until_complete(
resolve_plot_keys(
None if args.farmerkey == b"" else args.farmerkey.hex(),
None,
None if args.pool_key == b"" else args.pool_key.hex(),
None if args.contract == "" else args.contract,
maize_root_path,
log,
args.connect_to_daemon,
)
)
call_args = []
call_args.append(os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)))
call_args.append("-f")
call_args.append(bytes(plot_keys.farmer_public_key).hex())
if plot_keys.pool_public_key is not None:
call_args.append("-p")
call_args.append(bytes(plot_keys.pool_public_key).hex())
call_args.append("-t")
# s if s[-1] == os.path.sep else s + os.path.sep
call_args.append(dir_with_trailing_slash(args.tmpdir))
call_args.append("-2")
call_args.append(dir_with_trailing_slash(args.tmpdir2))
call_args.append("-d")
call_args.append(dir_with_trailing_slash(args.finaldir))
if plot_keys.pool_contract_address is not None:
call_args.append("-c")
call_args.append(plot_keys.pool_contract_address)
call_args.append("-n")
call_args.append(str(args.count))
call_args.append("-r")
call_args.append(str(args.threads))
call_args.append("-u")
call_args.append(str(args.buckets))
call_args.append("-v")
call_args.append(str(args.buckets3))
call_args.append("-w")
call_args.append(str(int(args.waitforcopy)))
call_args.append("-K")
call_args.append(str(args.rmulti2))
if args.size != 32:
call_args.append("-k")
call_args.append(str(args.size))
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_plotter(call_args, progress))
except Exception as e:
print(f"Exception while plotting: {type(e)} {e}")
print(f"Traceback: {traceback.format_exc()}")
| 33.195652 | 100 | 0.587426 |
01aa855e4227a19c721e2bd3bba87ef82e518ebd | 2,778 | py | Python | examples/extensions/sudoku.py | QiuWJX/cvxpy | fd1c225b0cdf541618e292cae1a4c7ea25ddc934 | [
"ECL-2.0",
"Apache-2.0"
] | 3,285 | 2015-01-03T04:02:29.000Z | 2021-04-19T14:51:29.000Z | examples/extensions/sudoku.py | QiuWJX/cvxpy | fd1c225b0cdf541618e292cae1a4c7ea25ddc934 | [
"ECL-2.0",
"Apache-2.0"
] | 1,138 | 2015-01-01T19:40:14.000Z | 2021-04-18T23:37:31.000Z | examples/extensions/sudoku.py | phschiele/cvxpy | a43aed7447b87f6d0fbc6f71ae5c7b84183f3369 | [
"ECL-2.0",
"Apache-2.0"
] | 765 | 2015-01-02T19:29:39.000Z | 2021-04-20T00:50:43.000Z | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cProfile
import pstats
import cvxopt
import numpy as np
from ncvx.boolean import Boolean
from cvxpy import Minimize, Problem, square
n = 9
# 9x9 sudoku grid
numbers = [Boolean(n,n), Boolean(n,n), Boolean(n,n),
Boolean(n,n), Boolean(n,n), Boolean(n,n),
Boolean(n,n), Boolean(n,n), Boolean(n,n)]
# TODO: 9*[Boolean(9,9)] doesn't work....
solution = cvxopt.matrix([
[0, 5, 2, 3, 7, 1, 8, 6, 4],
[6, 3, 7, 8, 0, 4, 5, 2, 1],
[1, 4, 8, 5, 2 ,6, 3, 0, 7],
[4, 7, 1, 2, 3, 0, 6, 5, 8],
[3, 6, 5, 1, 4, 8, 0, 7, 2],
[8, 2, 0, 6, 5, 7, 4, 1, 3],
[5, 1, 6, 7, 8, 3, 2, 4, 0],
[7, 0, 3, 4, 6, 2, 1, 8, 5],
[2, 8, 4, 0, 1, 5, 7, 3, 6]
])
# partial grid
known =[(0,6), (0,7), (1,4), (1,5), (1,8), (2,0), (2,2), (2,7), (2,8),
(3,0), (3,1), (4,0), (4,2), (4,4), (4,6), (4,8), (5,7), (5,8),
(6,0), (6,1), (6,6), (6,8), (7,0), (7,3), (7,4), (8,1), (8,2)]
def row(x,r):
m, n = x.size
for i in range(m):
for j in range(n):
if i == r: yield x[i,j]
def col(x,c):
m, n = x.size
for i in range(m):
for j in range(n):
if j == c: yield x[i,j]
def block(x,b):
m, n = x.size
for i in range(m):
for j in range(n):
# 0 block is r = 0,1, c = 0,1
# 1 block is r = 0,1, c = 2,3
# 2 block is r = 2,3, c = 0,1
# 3 block is r = 2,3, c = 2,3
if i // 3 == b // 3 and j // 3 == b % 3:
yield x[i,j]
pr = cProfile.Profile()
pr.enable()
# create the suboku constraints
constraints = [sum(numbers) == 1]
for i in range(n):
for num in range(n):
constraints.append(sum(row(numbers[num], i)) == 1)
constraints.append(sum(col(numbers[num], i)) == 1)
constraints.append(sum(block(numbers[num], i)) == 1)
constraints.extend(numbers[solution[k]][k] == 1 for k in known)
# attempt to solve
p = Problem(Minimize(sum(map(square, [num[0,0] for num in numbers]))), constraints)
p.solve(method="branch and bound")
pr.disable()
ps = pstats.Stats(pr)
ps.sort_stats('tottime').print_stats(.5)
A = np.zeros((n, n))
for i, num in enumerate(numbers):
A += i * num.value
print(np.sum(A - solution))
| 27.78 | 83 | 0.556875 |
31bcc78b85c803b5d08fea1bdfd024e85bf1720c | 7,625 | py | Python | threeML/plugins/UnbinnedPoissonLike.py | jasonfan1997/threeML | 21b1c76ad3423f745b9f56413d93ee01d1d5855f | [
"BSD-3-Clause"
] | 42 | 2019-01-10T06:11:38.000Z | 2022-03-21T20:08:37.000Z | threeML/plugins/UnbinnedPoissonLike.py | jasonfan1997/threeML | 21b1c76ad3423f745b9f56413d93ee01d1d5855f | [
"BSD-3-Clause"
] | 187 | 2019-01-15T13:21:16.000Z | 2022-03-31T12:15:55.000Z | threeML/plugins/UnbinnedPoissonLike.py | jasonfan1997/threeML | 21b1c76ad3423f745b9f56413d93ee01d1d5855f | [
"BSD-3-Clause"
] | 32 | 2019-01-14T10:24:56.000Z | 2022-03-10T10:09:59.000Z | import types
from collections.abc import Iterable
from typing import Optional, Tuple, Union
import astromodels
import numba as nb
import numpy as np
from threeML.io.logging import setup_logger
from threeML.plugin_prototype import PluginPrototype
__instrument_name = "n.a."
log = setup_logger(__name__)
_tiny = np.float64(np.finfo(1.).tiny)
class EventObservation(object):
def __init__(
self,
events: np.ndarray,
exposure: float,
start: Union[float, np.ndarray],
stop: Union[float, np.ndarray],
):
self._events = np.array(events)
self._exposure: float = exposure
if isinstance(start, Iterable) or isinstance(stop, Iterable):
assert isinstance(start, Iterable)
assert isinstance(stop, Iterable)
assert len(start) == len(stop)
for i, v in enumerate(start):
assert v < stop[i]
self._start: np.ndarray = start
self._stop: np.ndarray = stop
self._is_multi_interval: bool = True
else:
assert start < stop
self._start: float = float(start)
self._stop: float = float(stop)
self._is_multi_interval: bool = False
self._n_events: int = len(self._events)
log.debug(f"created event observation with")
log.debug(f"{self._start} {self._stop}")
@property
def events(self) -> np.ndarray:
return self._events
@property
def n_events(self) -> int:
return self._n_events
@property
def exposure(self) -> float:
return self._exposure
@property
def start(self) -> Union[float, np.ndarray]:
return self._start
@property
def stop(self) -> Union[float, np.ndarray]:
return self._stop
@property
def is_multi_interval(self) -> bool:
return self._is_multi_interval
class UnbinnedPoissonLike(PluginPrototype):
def __init__(
self,
name: str,
observation: EventObservation,
source_name: Optional[str] = None,
) -> None:
"""
This is a generic likelihood for unbinned Poisson data.
It is very slow for many events.
:param name: the plugin name
:param observation: and EventObservation container
:param source_name: option source name to apply to the source
"""
assert isinstance(observation, EventObservation)
self._observation: EventObservation = observation
self._source_name: str = source_name
self._n_events: int = self._observation.n_events
super(UnbinnedPoissonLike, self).__init__(
name=name, nuisance_parameters={})
def set_model(self, model: astromodels.Model) -> None:
"""
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
"""
self._like_model: astromodels.Model = model
# We assume there are no extended sources, since we cannot handle them here
assert self._like_model.get_number_of_extended_sources() == 0, (
"SpectrumLike plugins do not support " "extended sources"
)
# check if we set a source name that the source is in the model
if self._source_name is not None:
assert self._source_name in self._like_model.sources, (
"Source %s is not contained in "
"the likelihood model" % self._source_name
)
differential, integral = self._get_diff_and_integral(self._like_model)
self._integral_model = integral
self._model = differential
def _get_diff_and_integral(
self, likelihood_model: astromodels.Model
) -> Tuple[types.FunctionType, types.FunctionType]:
if self._source_name is None:
n_point_sources = likelihood_model.get_number_of_point_sources()
# Make a function which will stack all point sources (OGIP do not support spatial dimension)
def differential(energies):
fluxes = likelihood_model.get_point_source_fluxes(
0, energies, tag=self._tag
)
# If we have only one point source, this will never be executed
for i in range(1, n_point_sources):
fluxes += likelihood_model.get_point_source_fluxes(
i, energies, tag=self._tag
)
return fluxes
else:
# This SpectrumLike dataset refers to a specific source
# Note that we checked that self._source_name is in the model when the model was set
try:
def differential_flux(energies):
return likelihood_model.sources[self._source_name](
energies, tag=self._tag
)
except KeyError:
raise KeyError(
"This plugin has been assigned to source %s, "
"which does not exist in the current model" % self._source_name
)
# New way with simpson rule.
# Make sure to not calculate the model twice for the same energies
def integral(e1, e2):
# Simpson's rule
# single energy values given
return (
(e2 - e1)
/ 6.0
* (
differential(e1)
+ 4 * differential((e2 + e1) / 2.0)
+ differential(e2)
)
)
return differential, integral
def get_log_like(self) -> float:
"""
Return the value of the log-likelihood with the current values for the
parameters
"""
n_expected_counts: float = 0.
if self._observation.is_multi_interval:
for start, stop in zip(self._observation.start, self._observation.stop):
n_expected_counts += self._integral_model(start, stop)
else:
n_expected_counts += self._integral_model(
self._observation.start, self._observation.stop
)
M = self._model(self._observation.events) * self._observation.exposure
negative_mask = M < 0
if negative_mask.sum() > 0:
M[negative_mask] = 0.0
# use numba to sum the events
sum_logM = _evaluate_logM_sum(M, self._n_events)
minus_log_like = -n_expected_counts + sum_logM
return minus_log_like
def inner_fit(self) -> float:
"""
This is used for the profile likelihood. Keeping fixed all parameters in the
LikelihoodModel, this method minimize the logLike over the remaining nuisance
parameters, i.e., the parameters belonging only to the model for this
particular detector. If there are no nuisance parameters, simply return the
logLike value.
"""
return self.get_log_like()
def get_number_of_data_points(self):
return self._n_events
@nb.njit(fastmath=True)
def _evaluate_logM_sum(M, size):
# Evaluate the logarithm with protection for negative or small
# numbers, using a smooth linear extrapolation (better than just a sharp
# cutoff)
non_tiny_mask = M > 2.0 * _tiny
tink_mask = np.logical_not(non_tiny_mask)
if tink_mask.sum() > 0:
logM = np.zeros(size)
logM[tink_mask] = (np.abs(M[tink_mask])/_tiny) + np.log(_tiny) - 1
logM[non_tiny_mask] = np.log(M[non_tiny_mask])
else:
logM = np.log(M)
return logM.sum()
| 28.033088 | 104 | 0.603279 |
04c1ee153b25193cafbe23fce6ccec739edff0de | 3,104 | py | Python | codes/03.FEM_laplacian/HEALPix/16_check_reordering_mask.py | MartMilani/PDM | cca07a8485c6933361536286279ae6c7e14d7fa1 | [
"MIT"
] | null | null | null | codes/03.FEM_laplacian/HEALPix/16_check_reordering_mask.py | MartMilani/PDM | cca07a8485c6933361536286279ae6c7e14d7fa1 | [
"MIT"
] | null | null | null | codes/03.FEM_laplacian/HEALPix/16_check_reordering_mask.py | MartMilani/PDM | cca07a8485c6933361536286279ae6c7e14d7fa1 | [
"MIT"
] | null | null | null |
from __future__ import print_function
import matplotlib
matplotlib.use('tkAgg')
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from dolfin import *
import scipy
import numpy as np
import healpy as hp
from deepsphere import utils
# Test for PETSc and SLEPc
if not has_linear_algebra_backend("PETSc"):
print("DOLFIN has not been configured with PETSc. Exiting.")
exit()
if not has_slepc():
print("DOLFIN has not been configured with SLEPc. Exiting.")
exit()
spectral_content = dict()
nsides = [8]
for nside in nsides:
lmax = 3 * nside - 1
N = np.cumsum(np.arange(1, 2*lmax+2, 2))[-1]
# Define mesh, function space
mesh = Mesh("09_meshes/HEALPix_{}.xml".format(nside))
global_normal = Expression(("x[0]", "x[1]", "x[2]"), degree=1)
mesh.init_cell_orientations(global_normal)
V = FunctionSpace(mesh, "Lagrange", 1)
# Define basis and bilinear form
u = TrialFunction(V)
v = TestFunction(V)
a = dot(grad(u), grad(v))*dx
b = dot(u, v)*dx
# Assemble stiffness form
A = PETScMatrix()
B = PETScMatrix()
assemble(a, tensor=A)
assemble(b, tensor=B)
# Create eigensolver
eigensolver = SLEPcEigenSolver(A, B)
eigensolver.parameters['spectrum'] = 'target real'
eigensolver.parameters['tolerance'] = 1.e-3
eigensolver.parameters['maximum_iterations'] = 100
# Compute all eigenvalues of A x = \lambda x
print("Computing eigenvalues. This can take a minute.")
eigensolver.solve(N)
print('Done. Extracting results...')
eig_vectors = np.ndarray((12*nside**2, N), dtype='float')
eig_values = np.ndarray(N, dtype='float')
ne = 16
for i in range(ne):
# Extract largest (first) eigenpair
r, c, rx, cx = eigensolver.get_eigenpair(i)
# ----- keeping the dof ordering -----
eig_vectors[:, i] = np.asarray(rx)
eig_values[i] = r
for ind in range(ne):
hp.mollview(eig_vectors[:, ind],
title='Eigenvector {}'.format(ind),
nest=False,
sub=(ne//4, 4, ind+1),
max=np.max(np.abs(eig_vectors[:, :ne])),
min=-np.max(np.abs(eig_vectors[:, :ne])),
cbar=False,
rot=(0,0,0))
with utils.HiddenPrints():
hp.graticule();
plt.show()
# ---------- reordering ----------
reordered_mask = np.load('15_reordering_masks/reordering_mask_{}.npy'.format(nside))
eig_vectors = eig_vectors[reordered_mask]
# --------------------------------
ne = 16
for ind in range(ne):
hp.mollview(eig_vectors[:, ind],
title='Eigenvector {}'.format(ind),
nest=False,
sub=(ne//4, 4, ind+1),
max=np.max(np.abs(eig_vectors[:, :ne])),
min=-np.max(np.abs(eig_vectors[:, :ne])),
cbar=False,
rot=(0,0,0))
with utils.HiddenPrints():
hp.graticule();
plt.show()
| 29.561905 | 88 | 0.566688 |
ed718e4fc06a2f6b0ca5e9ea5a7dbb93f266d733 | 5,862 | py | Python | Python27/Tools/Scripts/patchcheck.py | Jeff-Tian/mybnb | 1a42890a1d2f1344d5465f8be10c42df01964f5a | [
"Apache-2.0"
] | 1 | 2019-05-17T08:58:11.000Z | 2019-05-17T08:58:11.000Z | Python27/Tools/Scripts/patchcheck.py | Jeff-Tian/mybnb | 1a42890a1d2f1344d5465f8be10c42df01964f5a | [
"Apache-2.0"
] | 2 | 2016-12-12T05:54:58.000Z | 2016-12-12T05:55:44.000Z | Python27/Tools/Scripts/patchcheck.py | Jeff-Tian/mybnb | 1a42890a1d2f1344d5465f8be10c42df01964f5a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import re
import sys
import shutil
import os.path
import subprocess
import sysconfig
import reindent
import untabify
SRCDIR = sysconfig.get_config_var('srcdir')
def n_files_str(count):
"""Return 'N file(s)' with the proper plurality on 'file'."""
return "{} file{}".format(count, "s" if count != 1 else "")
def status(message, modal=False, info=None):
"""Decorator to output status info to stdout."""
def decorated_fxn(fxn):
def call_fxn(*args, **kwargs):
sys.stdout.write(message + ' ... ')
sys.stdout.flush()
result = fxn(*args, **kwargs)
if not modal and not info:
print "done"
elif info:
print info(result)
else:
print "yes" if result else "NO"
return result
return call_fxn
return decorated_fxn
def mq_patches_applied():
"""Check if there are any applied MQ patches."""
cmd = 'hg qapplied'
st = subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
bstdout, _ = st.communicate()
return st.returncode == 0 and bstdout
finally:
st.stdout.close()
st.stderr.close()
@status("Getting the list of files that have been added/changed",
info=lambda x: n_files_str(len(x)))
def changed_files():
"""Get the list of changed or added files from the VCS."""
if os.path.isdir(os.path.join(SRCDIR, '.hg')):
vcs = 'hg'
cmd = 'hg status --added --modified --no-status'
if mq_patches_applied():
cmd += ' --rev qparent'
elif os.path.isdir('.svn'):
vcs = 'svn'
cmd = 'svn status --quiet --non-interactive --ignore-externals'
else:
sys.exit('need a checkout to get modified files')
st = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
try:
st.wait()
if vcs == 'hg':
return [x.decode().rstrip() for x in st.stdout]
else:
output = (x.decode().rstrip().rsplit(None, 1)[-1]
for x in st.stdout if x[0] in 'AM')
return set(path for path in output if os.path.isfile(path))
finally:
st.stdout.close()
def report_modified_files(file_paths):
count = len(file_paths)
if count == 0:
return n_files_str(count)
else:
lines = ["{}:".format(n_files_str(count))]
for path in file_paths:
lines.append(" {}".format(path))
return "\n".join(lines)
@status("Fixing whitespace", info=report_modified_files)
def normalize_whitespace(file_paths):
"""Make sure that the whitespace for .py files have been normalized."""
reindent.makebackup = False # No need to create backups.
fixed = []
for path in (x for x in file_paths if x.endswith('.py')):
if reindent.check(os.path.join(SRCDIR, path)):
fixed.append(path)
return fixed
@status("Fixing C file whitespace", info=report_modified_files)
def normalize_c_whitespace(file_paths):
"""Report if any C files """
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
with open(abspath, 'r') as f:
if '\t' not in f.read():
continue
untabify.process(abspath, 8, verbose=False)
fixed.append(path)
return fixed
ws_re = re.compile(br'\s+(\r?\n)$')
@status("Fixing docs whitespace", info=report_modified_files)
def normalize_docs_whitespace(file_paths):
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
try:
with open(abspath, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(br'\1', line) for line in lines]
if new_lines != lines:
shutil.copyfile(abspath, abspath + '.bak')
with open(abspath, 'wb') as f:
f.writelines(new_lines)
fixed.append(path)
except Exception as err:
print 'Cannot fix %s: %s' % (path, err)
return fixed
@status("Docs modified", modal=True)
def docs_modified(file_paths):
"""Report if any file in the Doc directory has been changed."""
return bool(file_paths)
@status("Misc/ACKS updated", modal=True)
def credit_given(file_paths):
"""Check if Misc/ACKS has been changed."""
return os.path.join('Misc', 'ACKS') in file_paths
@status("Misc/NEWS updated", modal=True)
def reported_news(file_paths):
"""Check if Misc/NEWS has been changed."""
return os.path.join('Misc', 'NEWS') in file_paths
def main():
file_paths = changed_files()
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
fn.endswith(('.rst', '.inc'))]
misc_files = {os.path.join('Misc', 'ACKS'), os.path.join('Misc', 'NEWS')}\
& set(file_paths)
# PEP 8 whitespace rules enforcement.
normalize_whitespace(python_files)
# C rules enforcement.
normalize_c_whitespace(c_files)
# Doc whitespace enforcement.
normalize_docs_whitespace(doc_files)
# Docs updated.
docs_modified(doc_files)
# Misc/ACKS changed.
credit_given(misc_files)
# Misc/NEWS changed.
reported_news(misc_files)
# Test suite run and passed.
if python_files or c_files:
end = " and check for refleaks?" if c_files else "?"
print
print "Did you run the test suite" + end
if __name__ == '__main__':
main()
| 31.516129 | 79 | 0.582907 |
1be91587d98931bf770ac3016789dc904ee655c5 | 2,926 | py | Python | trailblazer/store/models.py | jemten/trailblazer | dce3fe6ffd19e23c94fb6e223e4778a1c93960c9 | [
"MIT"
] | null | null | null | trailblazer/store/models.py | jemten/trailblazer | dce3fe6ffd19e23c94fb6e223e4778a1c93960c9 | [
"MIT"
] | null | null | null | trailblazer/store/models.py | jemten/trailblazer | dce3fe6ffd19e23c94fb6e223e4778a1c93960c9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import alchy
from sqlalchemy import Column, ForeignKey, orm, types, UniqueConstraint
from trailblazer.mip import sacct
from trailblazer.constants import TEMP_STATUSES
STATUS_OPTIONS = ('pending', 'running', 'completed', 'failed', 'error', 'canceled')
JOB_STATUS_OPTIONS = [category.lower() for category in sacct.CATEGORIES]
PRIORITY_OPTIONS = ('low', 'normal', 'high')
TYPES = ('wes', 'wgs', 'rna')
Model = alchy.make_declarative_base(Base=alchy.ModelBase)
class Info(Model):
"""Keep track of meta data."""
__tablename__ = 'info'
id = Column(types.Integer, primary_key=True)
created_at = Column(types.DateTime, default=datetime.datetime.now)
updated_at = Column(types.DateTime)
class User(Model):
__tablename__ = 'user'
id = Column(types.Integer, primary_key=True)
google_id = Column(types.String(128), unique=True)
email = Column(types.String(128), unique=True)
name = Column(types.String(128))
avatar = Column(types.Text)
created_at = Column(types.DateTime, default=datetime.datetime.now)
runs = orm.relationship('Analysis', backref='user')
@property
def first_name(self) -> str:
"""First part of name."""
return self.name.split(' ')[0]
class Analysis(Model):
"""Analysis record."""
__tablename__ = 'analysis'
__table_args__ = (UniqueConstraint('family', 'started_at', 'status',
name='_uc_family_start_status'),)
id = Column(types.Integer, primary_key=True)
family = Column(types.String(128), nullable=False)
version = Column(types.String(32))
logged_at = Column(types.DateTime, default=datetime.datetime.now)
started_at = Column(types.DateTime)
completed_at = Column(types.DateTime)
status = Column(types.Enum(*STATUS_OPTIONS))
priority = Column(types.Enum(*PRIORITY_OPTIONS))
out_dir = Column(types.Text)
config_path = Column(types.Text)
comment = Column(types.Text)
is_deleted = Column(types.Boolean, default=False)
is_visible = Column(types.Boolean, default=True)
type = Column(types.Enum(*TYPES))
user_id = Column(ForeignKey(User.id))
progress = Column(types.Float, default=0.)
failed_jobs = orm.relationship('Job', backref='analysis')
@property
def is_temp(self):
"""Check if the log is for a temporary status: running/pending."""
return self.status in TEMP_STATUSES
class Job(Model):
"""Represent a step in the pipeline."""
__tablename__ = 'job'
id = Column(types.Integer, primary_key=True)
analysis_id = Column(ForeignKey(Analysis.id, ondelete='CASCADE'), nullable=False)
slurm_id = Column(types.Integer)
name = Column(types.String(64))
context = Column(types.String(64))
started_at = Column(types.DateTime)
elapsed = Column(types.Integer)
status = Column(types.Enum(*JOB_STATUS_OPTIONS))
| 30.479167 | 85 | 0.685919 |
228979e17efef97d77a95d2a452d57631eb3b6ea | 1,253 | py | Python | generate_changelog/_attr_docs.py | coordt/generate-changelog | ecbeb444a8cc5c3f35af3b29127cd347bda5ece9 | [
"MIT"
] | 1 | 2022-03-07T09:37:54.000Z | 2022-03-07T09:37:54.000Z | generate_changelog/_attr_docs.py | coordt/generate-changelog | ecbeb444a8cc5c3f35af3b29127cd347bda5ece9 | [
"MIT"
] | null | null | null | generate_changelog/_attr_docs.py | coordt/generate-changelog | ecbeb444a8cc5c3f35af3b29127cd347bda5ece9 | [
"MIT"
] | null | null | null | """Get the attribute documentation for a class."""
import ast
import inspect
from .utilities import pairs
def attribute_docstrings(obj: type) -> dict:
"""Return the docstrings for all attributes of the object."""
cfg_source = inspect.getsource(obj)
tree = ast.parse(cfg_source)
if len(tree.body) != 1 or not isinstance(tree.body[0], ast.ClassDef):
raise TypeError("Unexpected object type.")
ast_class: ast.ClassDef = tree.body[0]
nodes = list(ast_class.body)
docstrings = {}
for (
a,
b,
) in pairs(nodes):
if isinstance(a, ast.AnnAssign) and isinstance(a.target, ast.Name) and a.simple:
name = a.target.id
elif isinstance(a, ast.Assign) and len(a.targets) == 1 and isinstance(a.targets[0], ast.Name):
name = a.targets[0].id
else:
continue
if isinstance(b, ast.Expr) and isinstance(b.value, ast.Constant) and isinstance(b.value.value, str):
docstrings[name] = inspect.cleandoc(b.value.value).strip()
elif isinstance(b, ast.Expr) and isinstance(b.value, ast.Str): # pragma: no cover
# Python <= 3.7
docstrings[name] = inspect.cleandoc(b.value.s).strip()
return docstrings
| 34.805556 | 108 | 0.632083 |
1347323828fa40f6b881ba008186ac75d77cb7bb | 9,254 | py | Python | hubspot/cms/site_search/models/error.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 1 | 2020-11-12T08:46:32.000Z | 2020-11-12T08:46:32.000Z | hubspot/cms/site_search/models/error.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | hubspot/cms/site_search/models/error.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
CMS Site Search
Use these endpoints for searching content on your HubSpot hosted CMS website(s). # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.cms.site_search.configuration import Configuration
class Error(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"message": "str",
"correlation_id": "str",
"category": "str",
"sub_category": "str",
"errors": "list[ErrorDetail]",
"context": "dict(str, list[str])",
"links": "dict(str, str)",
}
attribute_map = {
"message": "message",
"correlation_id": "correlationId",
"category": "category",
"sub_category": "subCategory",
"errors": "errors",
"context": "context",
"links": "links",
}
def __init__(
self,
message=None,
correlation_id=None,
category=None,
sub_category=None,
errors=None,
context=None,
links=None,
local_vars_configuration=None,
): # noqa: E501
"""Error - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._message = None
self._correlation_id = None
self._category = None
self._sub_category = None
self._errors = None
self._context = None
self._links = None
self.discriminator = None
self.message = message
self.correlation_id = correlation_id
self.category = category
if sub_category is not None:
self.sub_category = sub_category
if errors is not None:
self.errors = errors
if context is not None:
self.context = context
if links is not None:
self.links = links
@property
def message(self):
"""Gets the message of this Error. # noqa: E501
A human readable message describing the error along with remediation steps where appropriate # noqa: E501
:return: The message of this Error. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this Error.
A human readable message describing the error along with remediation steps where appropriate # noqa: E501
:param message: The message of this Error. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation and message is None
): # noqa: E501
raise ValueError(
"Invalid value for `message`, must not be `None`"
) # noqa: E501
self._message = message
@property
def correlation_id(self):
"""Gets the correlation_id of this Error. # noqa: E501
A unique identifier for the request. Include this value with any error reports or support tickets # noqa: E501
:return: The correlation_id of this Error. # noqa: E501
:rtype: str
"""
return self._correlation_id
@correlation_id.setter
def correlation_id(self, correlation_id):
"""Sets the correlation_id of this Error.
A unique identifier for the request. Include this value with any error reports or support tickets # noqa: E501
:param correlation_id: The correlation_id of this Error. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and correlation_id is None
): # noqa: E501
raise ValueError(
"Invalid value for `correlation_id`, must not be `None`"
) # noqa: E501
self._correlation_id = correlation_id
@property
def category(self):
"""Gets the category of this Error. # noqa: E501
The error category # noqa: E501
:return: The category of this Error. # noqa: E501
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this Error.
The error category # noqa: E501
:param category: The category of this Error. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation and category is None
): # noqa: E501
raise ValueError(
"Invalid value for `category`, must not be `None`"
) # noqa: E501
self._category = category
@property
def sub_category(self):
"""Gets the sub_category of this Error. # noqa: E501
A specific category that contains more specific detail about the error # noqa: E501
:return: The sub_category of this Error. # noqa: E501
:rtype: str
"""
return self._sub_category
@sub_category.setter
def sub_category(self, sub_category):
"""Sets the sub_category of this Error.
A specific category that contains more specific detail about the error # noqa: E501
:param sub_category: The sub_category of this Error. # noqa: E501
:type: str
"""
self._sub_category = sub_category
@property
def errors(self):
"""Gets the errors of this Error. # noqa: E501
further information about the error # noqa: E501
:return: The errors of this Error. # noqa: E501
:rtype: list[ErrorDetail]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this Error.
further information about the error # noqa: E501
:param errors: The errors of this Error. # noqa: E501
:type: list[ErrorDetail]
"""
self._errors = errors
@property
def context(self):
"""Gets the context of this Error. # noqa: E501
Context about the error condition # noqa: E501
:return: The context of this Error. # noqa: E501
:rtype: dict(str, list[str])
"""
return self._context
@context.setter
def context(self, context):
"""Sets the context of this Error.
Context about the error condition # noqa: E501
:param context: The context of this Error. # noqa: E501
:type: dict(str, list[str])
"""
self._context = context
@property
def links(self):
"""Gets the links of this Error. # noqa: E501
A map of link names to associated URIs containing documentation about the error or recommended remediation steps # noqa: E501
:return: The links of this Error. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Error.
A map of link names to associated URIs containing documentation about the error or recommended remediation steps # noqa: E501
:param links: The links of this Error. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Error):
return True
return self.to_dict() != other.to_dict()
| 29.009404 | 134 | 0.578453 |
79e778274d0d1f5b4376e9f14070a2bf5c691c9d | 2,790 | py | Python | framework/test_data.py | manimanasamylavarapu/OpenRank | 0bfd721a69e6056d24f18b3bf1b0bf73c1d3317c | [
"MIT"
] | 3 | 2020-05-08T04:34:26.000Z | 2022-03-06T15:01:04.000Z | framework/test_data.py | manimanasamylavarapu/OpenRank | 0bfd721a69e6056d24f18b3bf1b0bf73c1d3317c | [
"MIT"
] | null | null | null | framework/test_data.py | manimanasamylavarapu/OpenRank | 0bfd721a69e6056d24f18b3bf1b0bf73c1d3317c | [
"MIT"
] | 2 | 2020-11-22T21:28:03.000Z | 2020-12-09T07:07:09.000Z | from framework.Classes import Testcase
tc1 = Testcase()
tc1.id = "1"
tc1.input = '23\n34'
tc1.timeout = 1
tc2 = Testcase()
tc2.id = "2"
tc2.input = """21 34"""
tc2.timeout = 1
java_source_code_with_input = """
import java.util.*;
class Solution{
public static void main(String... args) {
Scanner scan = new Scanner(System.in);
int sum = scan.nextInt() + scan.nextInt();
System.out.println(sum);
}
}
"""
java_source_code_with_no_input = """
class Solution{
public static void main(String... args) {
System.out.println("Hello World");
}
}
"""
java_source_code_with_exception = """
class Solution{
public static void main(String... args) {
throw new RuntimeException();
}
}
"""
java_source_code_with_compile_error = """
class Solution{
public static void main(String... args) {
int a
}
}
"""
python3_source_code_add_two_numbers = """
# s = input()
# print(s)
# numbers = s.split()
number1 = input()
number2 = input()
sum = int(number1) + int(number2)
print(sum)
"""
python2_source_code_add_two_numbers = """
number1 = raw_input()
number2 = raw_input()
sum = int(number1) + int(number2)
print sum
"""
c_source_code_add_two_numbers = """
#include<stdio.h>
int main() {
int a, b, sum;
scanf("%d %d", &a, &b);
sum = a + b;
printf("%d", sum);
return(0);
}
"""
c_source_code_add_two_numbers_compile_error = """
#include<stdio.h>
int main() {
int a, b, sum;
scanf("%d %d", &a, &b);
sum = a b;
printf("%d", sum);
return(0);
}
"""
cpp_source_code_add_two_numbers = """
#include <iostream>
using namespace std;
int main()
{
int firstNumber, secondNumber, sumOfTwoNumbers;
cin >> firstNumber >> secondNumber;
// sum of two numbers in stored in variable sumOfTwoNumbers
sumOfTwoNumbers = firstNumber + secondNumber;
// Prints sum
cout << sumOfTwoNumbers;
return 0;
}"""
cs_source_code_add_two_numbers = """
using System;
namespace OpenRank
{
class Program
{
static void Main(string[] args)
{
int x;
int y;
int result;
x = Convert.ToInt32(Console.ReadLine());
y = Convert.ToInt32(Console.ReadLine());
result = x + y;
Console.Write(""+result);
}
}
}"""
| 19.787234 | 66 | 0.506093 |
973d4ffd343f7273f1caef4ab4ae73a1b69dac01 | 3,574 | py | Python | doctr/models/detection/core.py | thentgesMindee/doctr | f97e92ba1b7bcb785a60f2cf549f13f88e510609 | [
"Apache-2.0"
] | null | null | null | doctr/models/detection/core.py | thentgesMindee/doctr | f97e92ba1b7bcb785a60f2cf549f13f88e510609 | [
"Apache-2.0"
] | null | null | null | doctr/models/detection/core.py | thentgesMindee/doctr | f97e92ba1b7bcb785a60f2cf549f13f88e510609 | [
"Apache-2.0"
] | 1 | 2022-01-27T09:25:05.000Z | 2022-01-27T09:25:05.000Z | # Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from typing import List, Tuple
import cv2
import numpy as np
from doctr.utils.geometry import rotate_image
from doctr.utils.repr import NestedObject
from .._utils import get_bitmap_angle
__all__ = ['DetectionPostProcessor']
class DetectionPostProcessor(NestedObject):
"""Abstract class to postprocess the raw output of the model
Args:
min_size_box (int): minimal length (pix) to keep a box
max_candidates (int): maximum boxes to consider in a single page
box_thresh (float): minimal objectness score to consider a box
"""
def __init__(
self,
box_thresh: float = 0.5,
bin_thresh: float = 0.5,
rotated_bbox: bool = False
) -> None:
self.box_thresh = box_thresh
self.bin_thresh = bin_thresh
self.rotated_bbox = rotated_bbox
def extra_repr(self) -> str:
return f"box_thresh={self.box_thresh}"
@staticmethod
def box_score(
pred: np.ndarray,
points: np.ndarray,
rotated_bbox: bool = False
) -> float:
"""Compute the confidence score for a polygon : mean of the p values on the polygon
Args:
pred (np.ndarray): p map returned by the model
Returns:
polygon objectness
"""
h, w = pred.shape[:2]
if not rotated_bbox:
xmin = np.clip(np.floor(points[:, 0].min()).astype(np.int32), 0, w - 1)
xmax = np.clip(np.ceil(points[:, 0].max()).astype(np.int32), 0, w - 1)
ymin = np.clip(np.floor(points[:, 1].min()).astype(np.int32), 0, h - 1)
ymax = np.clip(np.ceil(points[:, 1].max()).astype(np.int32), 0, h - 1)
return pred[ymin:ymax + 1, xmin:xmax + 1].mean()
else:
mask = np.zeros((h, w), np.int32)
cv2.fillPoly(mask, [points.astype(np.int32)], 1.0)
product = pred * mask
return np.sum(product) / np.count_nonzero(product)
def bitmap_to_boxes(
self,
pred: np.ndarray,
bitmap: np.ndarray,
) -> np.ndarray:
raise NotImplementedError
def __call__(
self,
proba_map: np.ndarray,
) -> Tuple[List[np.ndarray], List[float]]:
"""Performs postprocessing for a list of model outputs
Args:
proba_map: probability map of shape (N, H, W)
returns:
list of N tensors (for each input sample), with each tensor of shape (*, 5) or (*, 6),
and a list of N angles (page orientations).
"""
bitmap = (proba_map > self.bin_thresh).astype(proba_map.dtype)
boxes_batch, angles_batch = [], []
# Kernel for opening, empirical law for ksize
k_size = 1 + int(proba_map[0].shape[0] / 512)
kernel = np.ones((k_size, k_size), np.uint8)
for p_, bitmap_ in zip(proba_map, bitmap):
# Perform opening (erosion + dilatation)
bitmap_ = cv2.morphologyEx(bitmap_.astype(np.float32), cv2.MORPH_OPEN, kernel)
# Rotate bitmap and proba_map
angle = get_bitmap_angle(bitmap_)
angles_batch.append(angle)
bitmap_, p_ = rotate_image(bitmap_, -angle, False), rotate_image(p_, -angle, False)
boxes = self.bitmap_to_boxes(pred=p_, bitmap=bitmap_)
boxes_batch.append(boxes)
return boxes_batch, angles_batch
| 32.490909 | 98 | 0.605204 |
5e471765638ffb9f1a8e3cfcf7330649f0dc1baf | 684 | py | Python | products/migrations/0004_auto_20160914_2031.py | mneeko/Pharmacy | d42338b858f5310767e97e8e5db5e4587de9e202 | [
"MIT"
] | null | null | null | products/migrations/0004_auto_20160914_2031.py | mneeko/Pharmacy | d42338b858f5310767e97e8e5db5e4587de9e202 | [
"MIT"
] | null | null | null | products/migrations/0004_auto_20160914_2031.py | mneeko/Pharmacy | d42338b858f5310767e97e8e5db5e4587de9e202 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-14 17:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20160914_2026'),
]
operations = [
migrations.AlterField(
model_name='product',
name='title',
field=models.CharField(max_length=120, unique=True),
),
migrations.AlterUniqueTogether(
name='product',
unique_together=set([]),
),
migrations.RemoveField(
model_name='product',
name='slug',
),
]
| 23.586207 | 64 | 0.576023 |
ca67b6c62695314029b7e5e302641953847b9ac6 | 12,894 | py | Python | tests/fast_tests/test_util.py | weizi-li/flow | 958b64ece8af6db715e6fb3b6042035b05b93bc2 | [
"MIT"
] | 1 | 2020-02-21T03:35:37.000Z | 2020-02-21T03:35:37.000Z | tests/fast_tests/test_util.py | weizi-li/flow | 958b64ece8af6db715e6fb3b6042035b05b93bc2 | [
"MIT"
] | null | null | null | tests/fast_tests/test_util.py | weizi-li/flow | 958b64ece8af6db715e6fb3b6042035b05b93bc2 | [
"MIT"
] | 1 | 2019-10-10T21:16:25.000Z | 2019-10-10T21:16:25.000Z | import unittest
import csv
import os
import json
import collections
from flow.core.params import VehicleParams
from flow.core.params import TrafficLightParams
from flow.controllers import IDMController, ContinuousRouter, RLController
from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig, \
InFlows, SumoCarFollowingParams
from flow.core.util import emission_to_csv
from flow.utils.flow_warnings import deprecated_attribute
from flow.utils.registry import make_create_env
from flow.utils.rllib import FlowParamsEncoder, get_flow_params
os.environ["TEST_FLAG"] = "True"
class TestEmissionToCSV(unittest.TestCase):
"""Tests the emission_to_csv function on a small file.
Ensures that the headers are correct, the length is correct, and some of
the components are correct.
"""
def test_emission_to_csv(self):
# current path
current_path = os.path.realpath(__file__).rsplit("/", 1)[0]
# run the emission_to_csv function on a small emission file
emission_to_csv(current_path + "/test_files/test-emission.xml")
# import the generated csv file and its headers
dict1 = []
filename = current_path + "/test_files/test-emission.csv"
with open(filename, "r") as infile:
reader = csv.reader(infile)
headers = next(reader)
for row in reader:
dict1.append(dict())
for i, key in enumerate(headers):
dict1[-1][key] = row[i]
# check the names of the headers
expected_headers = \
['time', 'CO', 'y', 'CO2', 'electricity', 'type', 'id', 'eclass',
'waiting', 'NOx', 'fuel', 'HC', 'x', 'route', 'relative_position',
'noise', 'angle', 'PMx', 'speed', 'edge_id', 'lane_number']
self.assertCountEqual(headers, expected_headers)
# check the number of rows of the generated csv file
# Note that, rl vehicles are missing their final (reset) values, which
# I don't think is a problem
self.assertEqual(len(dict1), 104)
class TestWarnings(unittest.TestCase):
"""Tests warning functions located in flow.utils.warnings"""
def test_deprecated_attribute(self):
# dummy class
class Foo(object):
pass
# dummy attribute name
dep_from = "bar_deprecated"
dep_to = "bar_new"
# check the deprecation warning is printing what is expected
self.assertWarnsRegex(
PendingDeprecationWarning,
"The attribute bar_deprecated in Foo is deprecated, use bar_new "
"instead.", deprecated_attribute, Foo(), dep_from, dep_to)
class TestRegistry(unittest.TestCase):
"""Tests the methods located in flow/utils/registry.py"""
def test_make_create_env(self):
"""Tests that the make_create_env methods generates an environment with
the expected flow parameters."""
# use a flow_params dict derived from flow/benchmarks/figureeight0.py
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(IDMController, {
"noise": 0.2
}),
routing_controller=(ContinuousRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="obey_safe_speed",
),
num_vehicles=13)
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
routing_controller=(ContinuousRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="obey_safe_speed",
),
num_vehicles=1)
flow_params = dict(
exp_tag="figure_eight_0",
env_name="AccelEnv",
network="FigureEightNetwork",
simulator='traci',
sim=SumoParams(
sim_step=0.1,
render=False,
),
env=EnvParams(
horizon=1500,
additional_params={
"target_velocity": 20,
"max_accel": 3,
"max_decel": 3,
"sort_vehicles": False
},
),
net=NetParams(
additional_params={
"radius_ring": 30,
"lanes": 1,
"speed_limit": 30,
"resolution": 40,
},
),
veh=vehicles,
initial=InitialConfig(),
tls=TrafficLightParams(),
)
# some random version number for testing
v = 23434
# call make_create_env
create_env, env_name = make_create_env(params=flow_params, version=v)
# check that the name is correct
self.assertEqual(env_name, '{}-v{}'.format(flow_params["env_name"], v))
# create the gym environment
env = create_env()
# Note that we expect the port number in sim_params to change, and
# that this feature is in fact needed to avoid race conditions
flow_params["sim"].port = env.sim_params.port
# check that each of the parameter match
self.assertEqual(env.env_params.__dict__,
flow_params["env"].__dict__)
self.assertEqual(env.sim_params.__dict__,
flow_params["sim"].__dict__)
self.assertEqual(env.network.traffic_lights.__dict__,
flow_params["tls"].__dict__)
self.assertEqual(env.net_params.__dict__,
flow_params["net"].__dict__)
self.assertEqual(env.initial_config.__dict__,
flow_params["initial"].__dict__)
self.assertEqual(env.__class__.__name__, flow_params["env_name"])
self.assertEqual(env.network.__class__.__name__,
flow_params["network"])
class TestRllib(unittest.TestCase):
"""Tests the methods located in flow/utils/rllib.py"""
def test_encoder_and_get_flow_params(self):
"""Tests both FlowParamsEncoder and get_flow_params.
FlowParamsEncoder is used to serialize the data from a flow_params dict
for replay by the visualizer later. Then, the get_flow_params method is
used to try and read the parameters from the config file, and is
checked to match expected results.
"""
# use a flow_params dict derived from flow/benchmarks/merge0.py
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(IDMController, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="obey_safe_speed",
),
# for testing coverage purposes, we add a routing controller
routing_controller=(ContinuousRouter, {}),
num_vehicles=5)
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="obey_safe_speed",
),
num_vehicles=0)
inflow = InFlows()
inflow.add(
veh_type="human",
edge="inflow_highway",
vehs_per_hour=1800,
departLane="free",
departSpeed=10)
inflow.add(
veh_type="rl",
edge="inflow_highway",
vehs_per_hour=200,
departLane="free",
departSpeed=10)
inflow.add(
veh_type="human",
edge="inflow_merge",
vehs_per_hour=100,
departLane="free",
departSpeed=7.5)
flow_params = dict(
exp_tag="merge_0",
env_name="MergePOEnv",
network="MergeNetwork",
sim=SumoParams(
restart_instance=True,
sim_step=0.5,
render=False,
),
env=EnvParams(
horizon=750,
sims_per_step=2,
warmup_steps=0,
additional_params={
"max_accel": 1.5,
"max_decel": 1.5,
"target_velocity": 20,
"num_rl": 5,
},
),
net=NetParams(
inflows=inflow,
additional_params={
"merge_length": 100,
"pre_merge_length": 500,
"post_merge_length": 100,
"merge_lanes": 1,
"highway_lanes": 1,
"speed_limit": 30,
},
),
veh=vehicles,
initial=InitialConfig(),
tls=TrafficLightParams(),
)
# create an config dict with space for the flow_params dict
config = {"env_config": {}}
# save the flow params for replay
flow_json = json.dumps(
flow_params, cls=FlowParamsEncoder, sort_keys=True, indent=4)
config['env_config']['flow_params'] = flow_json
# dump the config so we can fetch it
json_out_file = 'params.json'
with open(os.path.expanduser(json_out_file), 'w+') as outfile:
json.dump(
config,
outfile,
cls=FlowParamsEncoder,
sort_keys=True,
indent=4)
# fetch values using utility function `get_flow_params`
imported_flow_params = get_flow_params(config)
# delete the created file
os.remove(os.path.expanduser('params.json'))
# test that this inflows are correct
self.assertTrue(imported_flow_params["net"].inflows.__dict__ ==
flow_params["net"].inflows.__dict__)
imported_flow_params["net"].inflows = None
flow_params["net"].inflows = None
# make sure the rest of the imported flow_params match the originals
self.assertTrue(imported_flow_params["env"].__dict__ == flow_params[
"env"].__dict__)
self.assertTrue(imported_flow_params["initial"].__dict__ ==
flow_params["initial"].__dict__)
self.assertTrue(imported_flow_params["tls"].__dict__ == flow_params[
"tls"].__dict__)
self.assertTrue(imported_flow_params["sim"].__dict__ == flow_params[
"sim"].__dict__)
self.assertTrue(imported_flow_params["net"].__dict__ == flow_params[
"net"].__dict__)
self.assertTrue(
imported_flow_params["exp_tag"] == flow_params["exp_tag"])
self.assertTrue(
imported_flow_params["env_name"] == flow_params["env_name"])
self.assertTrue(
imported_flow_params["network"] == flow_params["network"])
def search_dicts(obj1, obj2):
"""Searches through dictionaries as well as lists of dictionaries
recursively to determine if any two components are mismatched."""
for key in obj1.keys():
# if an next element is a list, either compare the two lists,
# or if the lists contain dictionaries themselves, look at each
# dictionary component recursively to check for mismatches
if isinstance(obj1[key], list):
if len(obj1[key]) > 0:
if isinstance(obj1[key][0], dict):
for i in range(len(obj1[key])):
if not search_dicts(obj1[key][i],
obj2[key][i]):
return False
elif obj1[key] != obj2[key]:
return False
# if the next element is a dict, run through it recursively to
# determine if the separate elements of the dict match
if isinstance(obj1[key], (dict, collections.OrderedDict)):
if not search_dicts(obj1[key], obj2[key]):
return False
# if it is neither a list or a dictionary, compare to determine
# if the two elements match
elif obj1[key] != obj2[key]:
# if the two elements that are being compared are objects,
# make sure that they are the same type
if not isinstance(obj1[key], type(obj2[key])):
return False
return True
# make sure that the Vehicles class that was imported matches the
# original one
self.assertTrue(search_dicts(imported_flow_params["veh"].__dict__,
flow_params["veh"].__dict__))
if __name__ == '__main__':
unittest.main()
| 37.701754 | 79 | 0.564604 |
c4914f054f3a1e0a48d26fd96c5cc7889b3383b5 | 8,822 | py | Python | samples/search/entity_search_samples.py | sammydeprez/cognitive-services-python-sdk-samples | deb4483ec1db33138e32ce472a775f3ed874fb80 | [
"MIT"
] | null | null | null | samples/search/entity_search_samples.py | sammydeprez/cognitive-services-python-sdk-samples | deb4483ec1db33138e32ce472a775f3ed874fb80 | [
"MIT"
] | null | null | null | samples/search/entity_search_samples.py | sammydeprez/cognitive-services-python-sdk-samples | deb4483ec1db33138e32ce472a775f3ed874fb80 | [
"MIT"
] | null | null | null | from azure.cognitiveservices.search.entitysearch import EntitySearchAPI
from azure.cognitiveservices.search.entitysearch.models import Place, ErrorResponseException
from msrest.authentication import CognitiveServicesCredentials
# Add your Bing Entity Search subscription key to your environment variables.
SUBSCRIPTION_KEY = os.environ['BING_ENTITY_SEARCH_SUBSCRIPTION_KEY']
def dominant_entity_lookup(subscription_key):
"""DominantEntityLookup.
This will look up a single entity (Satya Nadella) and print out a short description about them.
"""
client = EntitySearchAPI(
endpoint="https://api.cognitive.microsoft.com",
credentials=CognitiveServicesCredentials(subscription_key)
)
try:
entity_data = client.entities.search(query="satya nadella")
if entity_data.entities.value:
# find the entity that represents the dominant one
main_entities = [entity for entity in entity_data.entities.value
if entity.entity_presentation_info.entity_scenario == "DominantEntity"]
if main_entities:
print(
'Searched for "Satya Nadella" and found a dominant entity with this description:')
print(main_entities[0].description)
else:
print("Couldn't find main entity Satya Nadella!")
else:
print("Didn't see any data..")
except Exception as err:
print("Encountered exception. {}".format(err))
def handling_disambiguation(subscription_key):
"""HandlingDisambiguation.
"This will handle disambiguation results for an ambiguous query (William Gates)".
"""
client = EntitySearchClient(
endpoint="https://api.cognitive.microsoft.com",
credentials=CognitiveServicesCredentials(subscription_key)
)
try:
entity_data = client.entities.search(query="william gates")
if entity_data.entities.value:
# find the entity that represents the dominant one
main_entities = [entity for entity in entity_data.entities.value
if entity.entity_presentation_info.entity_scenario == "DominantEntity"]
disambig_entities = [entity for entity in entity_data.entities.value
if entity.entity_presentation_info.entity_scenario == "DisambiguationItem"]
if main_entities:
main_entity = main_entities[0]
type_hint = main_entity.entity_presentation_info.entity_type_display_hint
print('Searched for "William Gates" and found a dominant entity {}with this description:'.format(
'"with type hint "{}" '.format(type_hint) if type_hint else ''))
print(main_entity.description)
else:
print("Couldn't find a reliable dominant entity for William Gates!")
if disambig_entities:
print(
"\nThis query is pretty ambiguous and can be referring to multiple things. Did you mean one of these:")
suggestions = []
for disambig_entity in disambig_entities:
suggestions.append("{} the {}".format(
disambig_entity.name, disambig_entity.entity_presentation_info.entity_type_display_hint))
print(", or ".join(suggestions))
else:
print(
"We didn't find any disambiguation items for William Gates, so we must be certain what you're talking about!")
else:
print("Didn't see any data..")
except Exception as err:
print("Encountered exception. {}".format(err))
def restaurant_lookup(subscription_key):
"""RestaurantLookup.
This will look up a single restaurant (john howie bellevue) and print out its phone number.
"""
client = EntitySearchClient(
endpoint="https://api.cognitive.microsoft.com",
credentials=CognitiveServicesCredentials(subscription_key)
)
try:
entity_data = client.entities.search(query="john howie bellevue")
if entity_data.places.value:
restaurant = entity_data.places.value[0]
# Some local entities will be places, others won't be. Depending on what class contains the data you want, you can check
# using isinstance one of the class, or try to get the attribute and handle the exception (EAFP principle).
# The recommended Python way is usually EAFP (see https://docs.python.org/3/glossary.html)
# In this case, the item being returned is technically a Restaurant, but the Place schema has the data we want (telephone)
# Pythonic approach : EAFP "Easier to ask for forgiveness than permission"
try:
telephone = restaurant.telephone
print(
'Searched for "John Howie Bellevue" and found a restaurant with this phone number:')
print(telephone)
except AttributeError:
print("Couldn't find a place!")
# More cross language approach
if isinstance(restaurant, Place):
print(
'Searched for "John Howie Bellevue" and found a restaurant with this phone number:')
print(restaurant.telephone)
else:
print("Couldn't find a place!")
else:
print("Didn't see any data..")
except Exception as err:
print("Encountered exception. {}".format(err))
def multiple_restaurant_lookup(subscription_key):
"""MultipleRestaurantLookup.
This will look up a list of restaurants (seattle restaurants) and present their names and phone numbers.
"""
client = EntitySearchClient(
endpoint="https://api.cognitive.microsoft.com",
credentials=CognitiveServicesCredentials(subscription_key)
)
try:
restaurants = client.entities.search(query="seattle restaurants")
if restaurants.places.value:
# get all the list items that relate to this query
list_items = [entity for entity in restaurants.places.value
if entity.entity_presentation_info.entity_scenario == "ListItem"]
if list_items:
suggestions = []
for place in list_items:
# Pythonic approach : EAFP "Easier to ask for forgiveness than permission"
# see https://docs.python.org/3/glossary.html
try:
suggestions.append("{} ({})".format(
place.name, place.telephone))
except AttributeError:
print(
"Unexpectedly found something that isn\'t a place named '{}'", place.name)
print("Ok, we found these places: ")
print(", ".join(suggestions))
else:
print("Couldn't find any relevant results for \"seattle restaurants\"")
else:
print("Didn't see any data..")
except Exception as err:
print("Encountered exception. {}".format(err))
def error(subscription_key):
"""Error.
This triggers a bad request and shows how to read the error response.
"""
client = EntitySearchClient(
endpoint="https://api.cognitive.microsoft.com",
credentials=CognitiveServicesCredentials(subscription_key)
)
try:
entity_data = client.entities.search(
query="tom cruise", market="no-ty")
except ErrorResponseException as err:
# The status code of the error should be a good indication of what occurred. However, if you'd like more details, you can dig into the response.
# Please note that depending on the type of error, the response schema might be different, so you aren't guaranteed a specific error response schema.
print("Exception occurred, status code {} with reason {}.\n".format(
err.response.status_code, err))
# if you'd like more descriptive information (if available)
if err.error.errors:
print("This is the errors I have:")
for error in err.error.errors:
print("Parameter \"{}\" has an invalid value \"{}\". SubCode is \"{}\". Detailed message is \"{}\"".format(
error.parameter, error.value, error.sub_code, error.message))
else:
print("There was no details on the error.")
if __name__ == "__main__":
import sys, os.path
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
from samples.tools import execute_samples
execute_samples(globals(), SUBSCRIPTION_KEY)
| 39.918552 | 157 | 0.627069 |
b66bd554a9873254dd79c77293bb1b8a078756c1 | 264 | py | Python | features.py | dracarys1312/speaker-recognition-py3 | 87bf3745fba85ab27bc63beffadeb5adeebe4928 | [
"Apache-2.0"
] | 240 | 2017-01-16T08:29:16.000Z | 2022-03-08T18:09:42.000Z | features.py | dracarys1312/speaker-recognition-py3 | 87bf3745fba85ab27bc63beffadeb5adeebe4928 | [
"Apache-2.0"
] | 13 | 2018-08-21T14:02:21.000Z | 2020-04-27T10:06:07.000Z | features.py | dracarys1312/speaker-recognition-py3 | 87bf3745fba85ab27bc63beffadeb5adeebe4928 | [
"Apache-2.0"
] | 92 | 2017-01-16T08:29:24.000Z | 2022-03-10T15:16:19.000Z | from python_speech_features import mfcc
import numpy as np
def get_feature(fs, signal):
mfcc_feature = mfcc(signal, fs)
if len(mfcc_feature) == 0:
print >> sys.stderr, "ERROR.. failed to extract mfcc feature:", len(signal)
return mfcc_feature
| 29.333333 | 83 | 0.708333 |
036fd25ae60ae6d66395b3abd79e3c4ac08dbc67 | 280 | py | Python | Alphabet/Small_Alphabet/Static_Small_Letter_While_Loop/While_LOOP_c.py | Polamreddykrishnareddy/PatternPackage | 893ab468a637cd70a0dd8f6d60f7f5c75a3db58f | [
"MIT"
] | null | null | null | Alphabet/Small_Alphabet/Static_Small_Letter_While_Loop/While_LOOP_c.py | Polamreddykrishnareddy/PatternPackage | 893ab468a637cd70a0dd8f6d60f7f5c75a3db58f | [
"MIT"
] | null | null | null | Alphabet/Small_Alphabet/Static_Small_Letter_While_Loop/While_LOOP_c.py | Polamreddykrishnareddy/PatternPackage | 893ab468a637cd70a0dd8f6d60f7f5c75a3db58f | [
"MIT"
] | null | null | null | #c
row=0
while row<7:
col =0
while col<7:
if (row==0 and col!=0) or (col==0 and row!=0 and row!=7 and row!=6)or (row==6 and col!=0):
print("*",end=" ")
else:
print(" ",end=" ")
col +=1
row +=1
print()
| 20 | 100 | 0.403571 |
641f6b998b15dd61c3524b0f67dd94b4017a2928 | 8,845 | py | Python | tests/test_gmm.py | tatuanb/monai_V1 | 41e492b61c78bb3c303f38b03fe9fdc74a3c2e96 | [
"Apache-2.0"
] | 2,971 | 2019-10-16T23:53:16.000Z | 2022-03-31T20:58:24.000Z | tests/test_gmm.py | catherine1996cn/MONAI | ff9bbfa82763de46cbac75553e340633e3d84ecb | [
"Apache-2.0"
] | 2,851 | 2020-01-10T16:23:44.000Z | 2022-03-31T22:14:53.000Z | tests/test_gmm.py | catherine1996cn/MONAI | ff9bbfa82763de46cbac75553e340633e3d84ecb | [
"Apache-2.0"
] | 614 | 2020-01-14T19:18:01.000Z | 2022-03-31T14:06:14.000Z | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.layers import GaussianMixtureModel
from tests.utils import skip_if_no_cuda
TEST_CASES = [
[
# Case Description
"2 batches, 1 dimensions, 1 channels, 2 classes, 2 mixtures",
# Class Count
2,
# Mixture Count
1,
# Features
[
# Batch 0
[
# Channel 0
[1, 1, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0.2, 1, 0.8, 0.5]
],
],
# Labels
[
# Batch 0
[
# Channel 0
[1, -1, 0, -1, 1]
],
# Batch 1
[
# Channel 0
[1, 1, 0, 0, -1]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0, 0, 1, 1, 0],
# Channel 1
[1, 1, 0, 0, 1],
],
# Batch 1
[
# Channel 0
[0, 0, 1, 1, 0.5],
# Channel 1
[1, 1, 0, 0, 0.5],
],
],
],
[
# Case Description
"1 batches, 1 dimensions, 5 channels, 2 classes, 1 mixtures",
# Class Count
2,
# Mixture Count
1,
# Features
[
# Batch 0
[
# Channel 0
[1.0, 0.9, 0.0, 0.0, 0.0],
# Channel 1
[0.0, 0.0, 0.3, 0.3, 0.4],
# Channel 2
[0.9, 0.8, 0.0, 0.0, 0.0],
# Channel 3
[0.7, 0.9, 0.0, 0.0, 0.0],
# Channel 4
[0.2, 0.1, 0.2, 0.2, 0.1],
]
],
# Labels
[
# Batch 0
[
# Channel 0
[0, 0, -1, 1, 1]
]
],
# Expected
[
# Batch 0
[
# Channel 0
[1, 1, 0, 0, 0],
# Channel 1
[0, 0, 1, 1, 1],
]
],
],
[
# Case Description
"1 batches, 2 dimensions, 2 channels, 4 classes, 4 mixtures",
# Class Count
4,
# Mixture Count
1,
# Features
[
# Batch 0
[
# Channel 0
[
[0.8, 0.8, 0.0, 0.0, 0.0],
[1.0, 0.9, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.8, 0.9],
[0.0, 0.0, 0.0, 0.9, 1.0],
],
# Channel 1
[
[0.8, 0.8, 0.0, 0.0, 0.0],
[0.7, 0.7, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.4, 0.5, 0.0, 0.0, 0.0],
[0.7, 0.6, 0.0, 0.0, 0.0],
],
]
],
# Labels
[
# Batch 0
[
# Channel 0
[[-1, 1, -1, 0, -1], [1, -1, -1, -1, -1], [-1, -1, 0, -1, -1], [2, 2, -1, 3, -1], [-1, -1, -1, -1, 3]]
]
],
# Expected
[
# Batch 0
[
# Channel 0
[
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
],
# Channel 1
[
[1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Channel 2
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
],
# Channel 3
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
],
]
],
],
[
# Case Description
"1 batches, 3 dimensions, 1 channels, 2 classes, 1 mixtures",
# Class Count
2,
# Mixture Count
1,
# Features
[
# Batch 0
[
# Channel 0
[
# Slice 0
[[0.7, 0.6, 0.0], [0.5, 0.4, 0.0], [0.0, 0.0, 0.0]],
# Slice 1
[[0.5, 0.6, 0.0], [0.4, 0.3, 0.0], [0.0, 0.0, 0.0]],
# Slice 2
[[0.3, 0.3, 0.0], [0.2, 0.1, 0.0], [0.0, 0.0, 0.0]],
]
]
],
# Labels
[
# Batch 0
[
# Channel 0
[
# Slice 0
[[0, -1, -1], [0, -1, -1], [-1, -1, 1]],
# Slice 1
[[0, 0, -1], [-1, -1, 1], [-1, 1, 1]],
# Slice 2
[[0, -1, -1], [-1, -1, -1], [-1, -1, -1]],
]
]
],
# Expected
[
# Batch 0
[
# Channel 0
[
# Slice 0
[[1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
# Slice 1
[[1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
# Slice 2
[[1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
# Channel 1
[
# Slice 0
[[0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [1.0, 1.0, 1.0]],
# Slice 1
[[0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [1.0, 1.0, 1.0]],
# Slice 2
[[0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [1.0, 1.0, 1.0]],
],
]
],
],
]
@skip_if_no_cuda
class GMMTestCase(unittest.TestCase):
def setUp(self):
self._var = os.environ.get("TORCH_EXTENSIONS_DIR", None)
self.tempdir = tempfile.mkdtemp()
os.environ["TORCH_EXTENSIONS_DIR"] = self.tempdir
def tearDown(self) -> None:
if self._var is None:
os.environ.pop("TORCH_EXTENSIONS_DIR", None)
else:
os.environ["TORCH_EXTENSIONS_DIR"] = f"{self._var}"
shutil.rmtree(self.tempdir)
@parameterized.expand(TEST_CASES)
def test_cuda(self, test_case_description, mixture_count, class_count, features, labels, expected):
# Device to run on
device = torch.device("cuda")
# Create tensors
features_tensor = torch.tensor(features, dtype=torch.float32, device=device)
labels_tensor = torch.tensor(labels, dtype=torch.int32, device=device)
# Create GMM
gmm = GaussianMixtureModel(features_tensor.size(1), mixture_count, class_count, verbose_build=True)
# reload GMM to confirm the build
_ = GaussianMixtureModel(features_tensor.size(1), mixture_count, class_count, verbose_build=False)
# reload quietly
_ = GaussianMixtureModel(features_tensor.size(1), mixture_count, class_count, verbose_build=True)
# Apply GMM
gmm.learn(features_tensor, labels_tensor)
results_tensor = gmm.apply(features_tensor)
# Read back results
results = results_tensor.cpu().numpy()
# Ensure result are as expected
np.testing.assert_allclose(results, expected, atol=1e-3)
if __name__ == "__main__":
unittest.main()
| 29.191419 | 118 | 0.36009 |
dad2db95c9242ebe28d4199607772d369b56cce2 | 4,825 | py | Python | mmtbx/command_line/sisa.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | mmtbx/command_line/sisa.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | mmtbx/command_line/sisa.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | # LIBTBX_SET_DISPATCHER_NAME phenix.sisa
'''
Author : Uervirojnangkoorn, M.
Created : 12/1/2014
Description : Commands linked to sisa libraries.
'''
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from libtbx.easy_mp import pool_map
import math
import sys,os
from six.moves import range
def read_input(args):
from mmtbx.sisa.optimize.mod_input import process_input
iparams, txt_out_input = process_input(args)
return iparams, txt_out_input
def sisa_optimize_mproc(micro_cycle_no, stack_no, miller_arrays, indices_selected, cdf_set, iparams):
from mmtbx.sisa.optimize.mod_optimize import sisa_optimizer
somer = sisa_optimizer()
result = somer.run_optimize(micro_cycle_no, stack_no, miller_arrays, indices_selected, cdf_set, iparams)
return result
def update_miller_arrays(miller_arrays, indices_selected, phis_selected, foms_selected):
flex_phib = miller_arrays[1].data()
flex_fomb = miller_arrays[2].data()
for i in range(len(indices_selected)):
flex_phib[indices_selected[i]] = phis_selected[i]
flex_fomb[indices_selected[i]] = foms_selected[i]
miller_arrays_out = []
miller_arrays_out.append(miller_arrays[0])
miller_arrays_out.append(miller_arrays[1].customized_copy(data=flex_phib))
miller_arrays_out.append(miller_arrays[2].customized_copy(data=flex_fomb))
miller_arrays_out.append(miller_arrays[3])
miller_arrays_out.append(miller_arrays[4])
return miller_arrays_out
if __name__=="__main__":
txt_out = ''
iparams, txt_out_input = read_input(sys.argv[:1])
txt_out += txt_out_input
from mmtbx.sisa.optimize.mod_mtz import mtz_handler
mtzh = mtz_handler()
miller_arrays, fp_sort_index_stacks, txt_out_format = mtzh.format_miller_arrays(iparams)
print(txt_out_format)
txt_out += txt_out_format
for i in range(iparams.n_macro_cycles):
txt_out += 'Macrocycle no. %4.0f\n'%(i+1)
print('Macrocycle no. %4.0f\n'%(i+1))
for j in range(len(fp_sort_index_stacks)):
#select the index group
i_sel = fp_sort_index_stacks[j]
#generate cdf_set for selected reflections
from mmtbx.sisa.optimize.mod_optimize import sisa_optimizer
somer = sisa_optimizer()
hl_selected = flex.hendrickson_lattman([miller_arrays[3].data()[ii_sel] for ii_sel in i_sel])
cdf_set = somer.calc_pdf_cdf_from_hl(hl_selected)
def sisa_optimize_mproc_wrapper(arg):
return sisa_optimize_mproc(arg, j, miller_arrays, i_sel, cdf_set, iparams)
sisa_optimize_results = pool_map(
args=range(iparams.n_micro_cycles),
func=sisa_optimize_mproc_wrapper,
processes=iparams.n_processors)
list_phis = []
foms_sum = None
list_skews = []
for result in sisa_optimize_results:
if result is not None:
phis, foms, skews, txt_out_optim = result
list_phis.append(phis)
list_skews.append(skews)
if foms_sum is None:
foms_sum = foms[:]
else:
foms_sum += foms[:]
print(txt_out_optim)
txt_out += txt_out_optim
#calculate centroid of list_phis
phis_averaged, skews_averaged, skews_std, n_phis_selected = somer.pickbestidv(
list_phis,
list_skews,
-99, 99)
foms_averaged = foms_sum/len(list_phis)
skew_phis_averaged, mapcc_phis_averaged, mpe_phis_averaged = somer.calc_stats(\
miller_arrays, i_sel, phis_averaged, foms_averaged, iparams)
txt_out_tmp = 'Averaged phis skew=%6.2f mapcc=%6.2f mpe=%6.2f'%( \
skew_phis_averaged, mapcc_phis_averaged, mpe_phis_averaged*180/math.pi)
print(txt_out_tmp)
txt_out += txt_out_tmp
#update miller_arrays
miller_arrays = update_miller_arrays(miller_arrays,
i_sel,
phis_averaged, foms_averaged)
#output mtz for optimized stack n
file_name_out = iparams.project_name + '/' + iparams.run_name + '/' + \
'sisa_cycle_'+str(i+1)+'_stack_'+str(j+1)+'.mtz'
mtzh.write_mtz(miller_arrays, file_name_out)
f = open(iparams.project_name + '/' + iparams.run_name +'/log.txt', 'w')
f.write(txt_out)
f.close()
print('Sisa done.')
if iparams.autodm:
print('Proceed with automatic density modification...(your density-modified map will be AutoBuild_run_n_/overall_best_denmod_map_coeffs.mtz.')
cmd='phenix.autobuild data=' + file_name_out + ' seq_file=' + str(iparams.seq_file) + \
' maps_only=True n_cycle_build_max=1 n_cycle_rebuild_max=0' + \
' input_ha_file=' + str(iparams.ha_file) + ' model=' + str(iparams.model_file)
print('Running: '+cmd)
os.system(cmd)
| 37.695313 | 146 | 0.692021 |
3876ec6d9febc6f9c81a79c059b778a70b29c347 | 407 | py | Python | getresponse/excs.py | OpenAT/getresponse-python | 8ab41bdbc794e8699ab4fb16af5cf73c6d9bafe3 | [
"MIT"
] | 3 | 2019-08-21T19:51:49.000Z | 2020-09-20T19:15:10.000Z | getresponse/excs.py | OpenAT/getresponse-python | 8ab41bdbc794e8699ab4fb16af5cf73c6d9bafe3 | [
"MIT"
] | 4 | 2019-08-24T13:38:07.000Z | 2021-02-05T11:30:54.000Z | getresponse/excs.py | OpenAT/getresponse-python | 8ab41bdbc794e8699ab4fb16af5cf73c6d9bafe3 | [
"MIT"
] | 8 | 2018-06-23T15:00:32.000Z | 2021-09-09T18:32:31.000Z | class BaseGetResponseError(Exception):
def __init__(self, message, response, *args, **kwargs):
super().__init__(message, *args, **kwargs)
self.response = response
class UniquePropertyError(BaseGetResponseError):
pass
class NotFoundError(BaseGetResponseError):
pass
class ValidationError(BaseGetResponseError):
pass
class ForbiddenError(BaseGetResponseError):
pass
| 20.35 | 59 | 0.739558 |
7dada9d2b8c3c5a230d5ca9b38db28cecb6446af | 1,261 | py | Python | Graphs/CourseSchedule.py | PK-100/Competitive_Programming | d0863feaaa99462b2999e85dcf115f7a6c08bb8d | [
"MIT"
] | 70 | 2018-06-25T21:20:15.000Z | 2022-03-24T03:55:17.000Z | Graphs/CourseSchedule.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 4 | 2018-09-04T13:12:20.000Z | 2021-06-20T08:29:12.000Z | Graphs/CourseSchedule.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 24 | 2018-12-26T05:15:32.000Z | 2022-01-23T23:04:54.000Z | from collections import defaultdict
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
if len(prerequisites) == 0:
return True
dependencies = defaultdict(list)
for i,j in prerequisites:
dependencies[i].append(j)
result = []
visited, visiting = set([]), set([])
def dfs(node):
"""
Utility function for depth first search algorithm
"""
if node in visited:
return # base case
visiting.add(node)
for neighbour in dependencies[node]:
if neighbour in visiting:
raise Exception('Cycle Found')
if neighbour not in visited:
dfs(neighbour)
visiting.remove(node)
visited.add(node)
result.append(node)
for node in list(dependencies):
try:
dfs(node)
except Exception as e:
return False
return True
# Algorithm: Topological Sort
# Time Complexity : O(V+E)
# V = Number of subjects / units
# E = Number of prerequisites / dependencies
# 207 LCM | 28.022222 | 81 | 0.521015 |
c76088a563b59970b0e93004a5e013bcfaef5e9b | 3,937 | py | Python | baseline/Mixhop/logger.py | WWW2022PAGG/PAGG | f3eddec9157d1b34c100883193221d64c26be7ee | [
"MIT"
] | 67 | 2021-04-02T22:57:32.000Z | 2022-03-25T14:08:47.000Z | baseline/Mixhop/logger.py | WWW2022PAGG/PAGG | f3eddec9157d1b34c100883193221d64c26be7ee | [
"MIT"
] | 1 | 2021-11-13T18:30:19.000Z | 2021-11-24T18:22:22.000Z | baseline/Mixhop/logger.py | WWW2022PAGG/PAGG | f3eddec9157d1b34c100883193221d64c26be7ee | [
"MIT"
] | 7 | 2021-04-07T14:42:11.000Z | 2022-03-20T16:46:06.000Z | import torch
from collections import defaultdict
class Logger(object):
""" Adapted from https://github.com/snap-stanford/ogb/ """
def __init__(self, runs, info=None):
self.info = info
self.results = [[] for _ in range(runs)]
def add_result(self, run, result):
assert len(result) == 3
assert run >= 0 and run < len(self.results)
self.results[run].append(result)
def print_statistics(self, run=None):
if run is not None:
result = 100 * torch.tensor(self.results[run])
argmax = result[:, 1].argmax().item()
print(f'Run {run + 1:02d}:')
print(f'Highest Train: {result[:, 0].max():.2f}')
print(f'Highest Valid: {result[:, 1].max():.2f}')
print(f' Final Train: {result[argmax, 0]:.2f}')
print(f' Final Test: {result[argmax, 2]:.2f}')
else:
result = 100 * torch.tensor(self.results)
best_results = []
for r in result:
train1 = r[:, 0].max().item()
valid = r[:, 1].max().item()
train2 = r[r[:, 1].argmax(), 0].item()
test = r[r[:, 1].argmax(), 2].item()
best_results.append((train1, valid, train2, test))
best_result = torch.tensor(best_results)
print(f'All runs:')
r = best_result[:, 0]
print(f'Highest Train: {r.mean():.2f} ± {r.std():.2f}')
r = best_result[:, 1]
print(f'Highest Valid: {r.mean():.2f} ± {r.std():.2f}')
r = best_result[:, 2]
print(f' Final Train: {r.mean():.2f} ± {r.std():.2f}')
r = best_result[:, 3]
print(f' Final Test: {r.mean():.2f} ± {r.std():.2f}')
return best_result[:, 1], best_result[:, 3]
class SimpleLogger(object):
""" Adapted from https://github.com/CUAI/CorrectAndSmooth """
def __init__(self, desc, param_names, num_values=2):
self.results = defaultdict(dict)
self.param_names = tuple(param_names)
self.used_args = list()
self.desc = desc
self.num_values = num_values
def add_result(self, run, args, values):
"""Takes run=int, args=tuple, value=tuple(float)"""
assert(len(args) == len(self.param_names))
assert(len(values) == self.num_values)
self.results[run][args] = values
if args not in self.used_args:
self.used_args.append(args)
def get_best(self, top_k=1):
all_results = []
for args in self.used_args:
results = [i[args] for i in self.results.values() if args in i]
results = torch.tensor(results)*100
results_mean = results.mean(dim=0)[-1]
results_std = results.std(dim=0)
all_results.append((args, results_mean))
results = sorted(all_results, key=lambda x: x[1], reverse=True)[:top_k]
return [i[0] for i in results]
def prettyprint(self, x):
if isinstance(x, float):
return '%.2f' % x
return str(x)
def display(self, args = None):
disp_args = self.used_args if args is None else args
if len(disp_args) > 1:
print(f'{self.desc} {self.param_names}, {len(self.results.keys())} runs')
for args in disp_args:
results = [i[args] for i in self.results.values() if args in i]
results = torch.tensor(results)*100
results_mean = results.mean(dim=0)
results_std = results.std(dim=0)
res_str = f'{results_mean[0]:.2f} ± {results_std[0]:.2f}'
for i in range(1, self.num_values):
res_str += f' -> {results_mean[i]:.2f} ± {results_std[1]:.2f}'
print(f'Args {[self.prettyprint(x) for x in args]}: {res_str}')
if len(disp_args) > 1:
print()
return results
| 38.980198 | 85 | 0.537973 |
4054ed150dff5a69d73d0be531136fe5ab29f0d3 | 5,588 | py | Python | looker_client_31/looker_sdk/oidc_group_read.py | ContrastingSounds/looker_sdk_31 | f973434049fff1b605b10086ab8b84f2f62e3489 | [
"MIT"
] | null | null | null | looker_client_31/looker_sdk/oidc_group_read.py | ContrastingSounds/looker_sdk_31 | f973434049fff1b605b10086ab8b84f2f62e3489 | [
"MIT"
] | null | null | null | looker_client_31/looker_sdk/oidc_group_read.py | ContrastingSounds/looker_sdk_31 | f973434049fff1b605b10086ab8b84f2f62e3489 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Experimental Looker API 3.1 Preview
This API 3.1 is in active development. Breaking changes are likely to occur to some API functions in future Looker releases until API 3.1 is officially launched and upgraded to beta status. If you have time and interest to experiment with new or modified services exposed in this embryonic API 3.1, we welcome your participation and feedback! For large development efforts or critical line-of-business projects, we strongly recommend you stick with the API 3.0 while API 3.1 is under construction. # noqa: E501
OpenAPI spec version: 3.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from looker_client_31.looker_sdk.role import Role # noqa: F401,E501
class OIDCGroupRead(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'roles': 'list[Role]',
'url': 'str',
'can': 'dict(str, bool)'
}
attribute_map = {
'name': 'name',
'roles': 'roles',
'url': 'url',
'can': 'can'
}
def __init__(self, name=None, roles=None, url=None, can=None): # noqa: E501
"""OIDCGroupRead - a model defined in Swagger""" # noqa: E501
self._name = None
self._roles = None
self._url = None
self._can = None
self.discriminator = None
if name is not None:
self.name = name
if roles is not None:
self.roles = roles
if url is not None:
self.url = url
if can is not None:
self.can = can
@property
def name(self):
"""Gets the name of this OIDCGroupRead. # noqa: E501
Name of group in OIDC # noqa: E501
:return: The name of this OIDCGroupRead. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this OIDCGroupRead.
Name of group in OIDC # noqa: E501
:param name: The name of this OIDCGroupRead. # noqa: E501
:type: str
"""
self._name = name
@property
def roles(self):
"""Gets the roles of this OIDCGroupRead. # noqa: E501
Looker Roles # noqa: E501
:return: The roles of this OIDCGroupRead. # noqa: E501
:rtype: list[Role]
"""
return self._roles
@roles.setter
def roles(self, roles):
"""Sets the roles of this OIDCGroupRead.
Looker Roles # noqa: E501
:param roles: The roles of this OIDCGroupRead. # noqa: E501
:type: list[Role]
"""
self._roles = roles
@property
def url(self):
"""Gets the url of this OIDCGroupRead. # noqa: E501
Link to oidc config # noqa: E501
:return: The url of this OIDCGroupRead. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this OIDCGroupRead.
Link to oidc config # noqa: E501
:param url: The url of this OIDCGroupRead. # noqa: E501
:type: str
"""
self._url = url
@property
def can(self):
"""Gets the can of this OIDCGroupRead. # noqa: E501
Operations the current user is able to perform on this object # noqa: E501
:return: The can of this OIDCGroupRead. # noqa: E501
:rtype: dict(str, bool)
"""
return self._can
@can.setter
def can(self, can):
"""Sets the can of this OIDCGroupRead.
Operations the current user is able to perform on this object # noqa: E501
:param can: The can of this OIDCGroupRead. # noqa: E501
:type: dict(str, bool)
"""
self._can = can
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OIDCGroupRead):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.800995 | 518 | 0.565855 |
d5bf1cb2b5d513df65da44c6b3981872d461fadd | 1,149 | py | Python | cookbook/gravmag_basin2d_trapezoidal.py | XuesongDing/fatiando | 57a0e0802fde2e53628511d3a7a2964e69bb309a | [
"BSD-3-Clause"
] | 179 | 2015-03-08T08:50:45.000Z | 2022-03-20T08:19:05.000Z | cookbook/gravmag_basin2d_trapezoidal.py | XuesongDing/fatiando | 57a0e0802fde2e53628511d3a7a2964e69bb309a | [
"BSD-3-Clause"
] | 207 | 2015-01-12T17:04:57.000Z | 2021-01-08T23:36:11.000Z | cookbook/gravmag_basin2d_trapezoidal.py | XuesongDing/fatiando | 57a0e0802fde2e53628511d3a7a2964e69bb309a | [
"BSD-3-Clause"
] | 114 | 2015-01-29T18:51:22.000Z | 2022-03-25T12:35:43.000Z | """
GravMag: Simple gravity inversion for the relief of a 2D trapezoidal basin
"""
import numpy
from fatiando import utils
from fatiando.mesher import Polygon
from fatiando.gravmag import talwani, basin2d
from fatiando.vis import mpl
verts = [(10000, 1.), (90000, 1.), (90000, 7000), (10000, 3330)]
model = Polygon(verts, {'density': -100})
x = numpy.arange(0., 100000., 1000.)
z = numpy.zeros_like(x)
gz = utils.contaminate(talwani.gz(x, z, [model]), 0.5)
solver = basin2d.Trapezoidal(x, z, gz, verts[0:2], density=-100).config(
'levmarq', initial=[9000, 500]).fit()
estimate = solver.estimate_
mpl.figure()
mpl.subplot(2, 1, 1)
mpl.title("Gravity anomaly")
mpl.plot(x, gz, 'ok', label='Observed')
mpl.plot(x, solver.predicted(), '-r', linewidth=2, label='Predicted')
mpl.legend(loc='lower left', numpoints=1)
mpl.ylabel("mGal")
mpl.xlim(0, 100000)
mpl.subplot(2, 1, 2)
mpl.polygon(estimate, 'o-r', linewidth=2, fill='r',
alpha=0.3, label='Estimated')
mpl.polygon(model, '--k', linewidth=2, label='True')
mpl.legend(loc='lower left', numpoints=1)
mpl.xlabel("X")
mpl.ylabel("Z")
mpl.set_area((0, 100000, 10000, -500))
mpl.show()
| 31.054054 | 74 | 0.686684 |
d7175192e82569bf5a1d8a1140bea965480d982a | 104,287 | py | Python | resources_rc.py | adnaniazi/pytimer | 7d50599b3f4e18121dd40fdf41a3cc31b7a17abb | [
"CC-BY-4.0"
] | null | null | null | resources_rc.py | adnaniazi/pytimer | 7d50599b3f4e18121dd40fdf41a3cc31b7a17abb | [
"CC-BY-4.0"
] | null | null | null | resources_rc.py | adnaniazi/pytimer | 7d50599b3f4e18121dd40fdf41a3cc31b7a17abb | [
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Thu Jul 2 10:07:40 2015
# by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = b"\
\x00\x00\x32\xd7\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x02\x2b\x00\x00\x02\x2b\x08\x06\x00\x00\x00\x2e\xae\xd9\x28\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x0a\x4f\x69\x43\x43\x50\x50\x68\x6f\
\x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\
\x6c\x65\x00\x00\x78\xda\x9d\x53\x67\x54\x53\xe9\x16\x3d\xf7\xde\
\xf4\x42\x4b\x88\x80\x94\x4b\x6f\x52\x15\x08\x20\x52\x42\x8b\x80\
\x14\x91\x26\x2a\x21\x09\x10\x4a\x88\x21\xa1\xd9\x15\x51\xc1\x11\
\x45\x45\x04\x1b\xc8\xa0\x88\x03\x8e\x8e\x80\x8c\x15\x51\x2c\x0c\
\x8a\x0a\xd8\x07\xe4\x21\xa2\x8e\x83\xa3\x88\x8a\xca\xfb\xe1\x7b\
\xa3\x6b\xd6\xbc\xf7\xe6\xcd\xfe\xb5\xd7\x3e\xe7\xac\xf3\x9d\xb3\
\xcf\x07\xc0\x08\x0c\x96\x48\x33\x51\x35\x80\x0c\xa9\x42\x1e\x11\
\xe0\x83\xc7\xc4\xc6\xe1\xe4\x2e\x40\x81\x0a\x24\x70\x00\x10\x08\
\xb3\x64\x21\x73\xfd\x23\x01\x00\xf8\x7e\x3c\x3c\x2b\x22\xc0\x07\
\xbe\x00\x01\x78\xd3\x0b\x08\x00\xc0\x4d\x9b\xc0\x30\x1c\x87\xff\
\x0f\xea\x42\x99\x5c\x01\x80\x84\x01\xc0\x74\x91\x38\x4b\x08\x80\
\x14\x00\x40\x7a\x8e\x42\xa6\x00\x40\x46\x01\x80\x9d\x98\x26\x53\
\x00\xa0\x04\x00\x60\xcb\x63\x62\xe3\x00\x50\x2d\x00\x60\x27\x7f\
\xe6\xd3\x00\x80\x9d\xf8\x99\x7b\x01\x00\x5b\x94\x21\x15\x01\xa0\
\x91\x00\x20\x13\x65\x88\x44\x00\x68\x3b\x00\xac\xcf\x56\x8a\x45\
\x00\x58\x30\x00\x14\x66\x4b\xc4\x39\x00\xd8\x2d\x00\x30\x49\x57\
\x66\x48\x00\xb0\xb7\x00\xc0\xce\x10\x0b\xb2\x00\x08\x0c\x00\x30\
\x51\x88\x85\x29\x00\x04\x7b\x00\x60\xc8\x23\x23\x78\x00\x84\x99\
\x00\x14\x46\xf2\x57\x3c\xf1\x2b\xae\x10\xe7\x2a\x00\x00\x78\x99\
\xb2\x3c\xb9\x24\x39\x45\x81\x5b\x08\x2d\x71\x07\x57\x57\x2e\x1e\
\x28\xce\x49\x17\x2b\x14\x36\x61\x02\x61\x9a\x40\x2e\xc2\x79\x99\
\x19\x32\x81\x34\x0f\xe0\xf3\xcc\x00\x00\xa0\x91\x15\x11\xe0\x83\
\xf3\xfd\x78\xce\x0e\xae\xce\xce\x36\x8e\xb6\x0e\x5f\x2d\xea\xbf\
\x06\xff\x22\x62\x62\xe3\xfe\xe5\xcf\xab\x70\x40\x00\x00\xe1\x74\
\x7e\xd1\xfe\x2c\x2f\xb3\x1a\x80\x3b\x06\x80\x6d\xfe\xa2\x25\xee\
\x04\x68\x5e\x0b\xa0\x75\xf7\x8b\x66\xb2\x0f\x40\xb5\x00\xa0\xe9\
\xda\x57\xf3\x70\xf8\x7e\x3c\x3c\x45\xa1\x90\xb9\xd9\xd9\xe5\xe4\
\xe4\xd8\x4a\xc4\x42\x5b\x61\xca\x57\x7d\xfe\x67\xc2\x5f\xc0\x57\
\xfd\x6c\xf9\x7e\x3c\xfc\xf7\xf5\xe0\xbe\xe2\x24\x81\x32\x5d\x81\
\x47\x04\xf8\xe0\xc2\xcc\xf4\x4c\xa5\x1c\xcf\x92\x09\x84\x62\xdc\
\xe6\x8f\x47\xfc\xb7\x0b\xff\xfc\x1d\xd3\x22\xc4\x49\x62\xb9\x58\
\x2a\x14\xe3\x51\x12\x71\x8e\x44\x9a\x8c\xf3\x32\xa5\x22\x89\x42\
\x92\x29\xc5\x25\xd2\xff\x64\xe2\xdf\x2c\xfb\x03\x3e\xdf\x35\x00\
\xb0\x6a\x3e\x01\x7b\x91\x2d\xa8\x5d\x63\x03\xf6\x4b\x27\x10\x58\
\x74\xc0\xe2\xf7\x00\x00\xf2\xbb\x6f\xc1\xd4\x28\x08\x03\x80\x68\
\x83\xe1\xcf\x77\xff\xef\x3f\xfd\x47\xa0\x25\x00\x80\x66\x49\x92\
\x71\x00\x00\x5e\x44\x24\x2e\x54\xca\xb3\x3f\xc7\x08\x00\x00\x44\
\xa0\x81\x2a\xb0\x41\x1b\xf4\xc1\x18\x2c\xc0\x06\x1c\xc1\x05\xdc\
\xc1\x0b\xfc\x60\x36\x84\x42\x24\xc4\xc2\x42\x10\x42\x0a\x64\x80\
\x1c\x72\x60\x29\xac\x82\x42\x28\x86\xcd\xb0\x1d\x2a\x60\x2f\xd4\
\x40\x1d\x34\xc0\x51\x68\x86\x93\x70\x0e\x2e\xc2\x55\xb8\x0e\x3d\
\x70\x0f\xfa\x61\x08\x9e\xc1\x28\xbc\x81\x09\x04\x41\xc8\x08\x13\
\x61\x21\xda\x88\x01\x62\x8a\x58\x23\x8e\x08\x17\x99\x85\xf8\x21\
\xc1\x48\x04\x12\x8b\x24\x20\xc9\x88\x14\x51\x22\x4b\x91\x35\x48\
\x31\x52\x8a\x54\x20\x55\x48\x1d\xf2\x3d\x72\x02\x39\x87\x5c\x46\
\xba\x91\x3b\xc8\x00\x32\x82\xfc\x86\xbc\x47\x31\x94\x81\xb2\x51\
\x3d\xd4\x0c\xb5\x43\xb9\xa8\x37\x1a\x84\x46\xa2\x0b\xd0\x64\x74\
\x31\x9a\x8f\x16\xa0\x9b\xd0\x72\xb4\x1a\x3d\x8c\x36\xa1\xe7\xd0\
\xab\x68\x0f\xda\x8f\x3e\x43\xc7\x30\xc0\xe8\x18\x07\x33\xc4\x6c\
\x30\x2e\xc6\xc3\x42\xb1\x38\x2c\x09\x93\x63\xcb\xb1\x22\xac\x0c\
\xab\xc6\x1a\xb0\x56\xac\x03\xbb\x89\xf5\x63\xcf\xb1\x77\x04\x12\
\x81\x45\xc0\x09\x36\x04\x77\x42\x20\x61\x1e\x41\x48\x58\x4c\x58\
\x4e\xd8\x48\xa8\x20\x1c\x24\x34\x11\xda\x09\x37\x09\x03\x84\x51\
\xc2\x27\x22\x93\xa8\x4b\xb4\x26\xba\x11\xf9\xc4\x18\x62\x32\x31\
\x87\x58\x48\x2c\x23\xd6\x12\x8f\x13\x2f\x10\x7b\x88\x43\xc4\x37\
\x24\x12\x89\x43\x32\x27\xb9\x90\x02\x49\xb1\xa4\x54\xd2\x12\xd2\
\x46\xd2\x6e\x52\x23\xe9\x2c\xa9\x9b\x34\x48\x1a\x23\x93\xc9\xda\
\x64\x6b\xb2\x07\x39\x94\x2c\x20\x2b\xc8\x85\xe4\x9d\xe4\xc3\xe4\
\x33\xe4\x1b\xe4\x21\xf2\x5b\x0a\x9d\x62\x40\x71\xa4\xf8\x53\xe2\
\x28\x52\xca\x6a\x4a\x19\xe5\x10\xe5\x34\xe5\x06\x65\x98\x32\x41\
\x55\xa3\x9a\x52\xdd\xa8\xa1\x54\x11\x35\x8f\x5a\x42\xad\xa1\xb6\
\x52\xaf\x51\x87\xa8\x13\x34\x75\x9a\x39\xcd\x83\x16\x49\x4b\xa5\
\xad\xa2\x95\xd3\x1a\x68\x17\x68\xf7\x69\xaf\xe8\x74\xba\x11\xdd\
\x95\x1e\x4e\x97\xd0\x57\xd2\xcb\xe9\x47\xe8\x97\xe8\x03\xf4\x77\
\x0c\x0d\x86\x15\x83\xc7\x88\x67\x28\x19\x9b\x18\x07\x18\x67\x19\
\x77\x18\xaf\x98\x4c\xa6\x19\xd3\x8b\x19\xc7\x54\x30\x37\x31\xeb\
\x98\xe7\x99\x0f\x99\x6f\x55\x58\x2a\xb6\x2a\x7c\x15\x91\xca\x0a\
\x95\x4a\x95\x26\x95\x1b\x2a\x2f\x54\xa9\xaa\xa6\xaa\xde\xaa\x0b\
\x55\xf3\x55\xcb\x54\x8f\xa9\x5e\x53\x7d\xae\x46\x55\x33\x53\xe3\
\xa9\x09\xd4\x96\xab\x55\xaa\x9d\x50\xeb\x53\x1b\x53\x67\xa9\x3b\
\xa8\x87\xaa\x67\xa8\x6f\x54\x3f\xa4\x7e\x59\xfd\x89\x06\x59\xc3\
\x4c\xc3\x4f\x43\xa4\x51\xa0\xb1\x5f\xe3\xbc\xc6\x20\x0b\x63\x19\
\xb3\x78\x2c\x21\x6b\x0d\xab\x86\x75\x81\x35\xc4\x26\xb1\xcd\xd9\
\x7c\x76\x2a\xbb\x98\xfd\x1d\xbb\x8b\x3d\xaa\xa9\xa1\x39\x43\x33\
\x4a\x33\x57\xb3\x52\xf3\x94\x66\x3f\x07\xe3\x98\x71\xf8\x9c\x74\
\x4e\x09\xe7\x28\xa7\x97\xf3\x7e\x8a\xde\x14\xef\x29\xe2\x29\x1b\
\xa6\x34\x4c\xb9\x31\x65\x5c\x6b\xaa\x96\x97\x96\x58\xab\x48\xab\
\x51\xab\x47\xeb\xbd\x36\xae\xed\xa7\x9d\xa6\xbd\x45\xbb\x59\xfb\
\x81\x0e\x41\xc7\x4a\x27\x5c\x27\x47\x67\x8f\xce\x05\x9d\xe7\x53\
\xd9\x53\xdd\xa7\x0a\xa7\x16\x4d\x3d\x3a\xf5\xae\x2e\xaa\x6b\xa5\
\x1b\xa1\xbb\x44\x77\xbf\x6e\xa7\xee\x98\x9e\xbe\x5e\x80\x9e\x4c\
\x6f\xa7\xde\x79\xbd\xe7\xfa\x1c\x7d\x2f\xfd\x54\xfd\x6d\xfa\xa7\
\xf5\x47\x0c\x58\x06\xb3\x0c\x24\x06\xdb\x0c\xce\x18\x3c\xc5\x35\
\x71\x6f\x3c\x1d\x2f\xc7\xdb\xf1\x51\x43\x5d\xc3\x40\x43\xa5\x61\
\x95\x61\x97\xe1\x84\x91\xb9\xd1\x3c\xa3\xd5\x46\x8d\x46\x0f\x8c\
\x69\xc6\x5c\xe3\x24\xe3\x6d\xc6\x6d\xc6\xa3\x26\x06\x26\x21\x26\
\x4b\x4d\xea\x4d\xee\x9a\x52\x4d\xb9\xa6\x29\xa6\x3b\x4c\x3b\x4c\
\xc7\xcd\xcc\xcd\xa2\xcd\xd6\x99\x35\x9b\x3d\x31\xd7\x32\xe7\x9b\
\xe7\x9b\xd7\x9b\xdf\xb7\x60\x5a\x78\x5a\x2c\xb6\xa8\xb6\xb8\x65\
\x49\xb2\xe4\x5a\xa6\x59\xee\xb6\xbc\x6e\x85\x5a\x39\x59\xa5\x58\
\x55\x5a\x5d\xb3\x46\xad\x9d\xad\x25\xd6\xbb\xad\xbb\xa7\x11\xa7\
\xb9\x4e\x93\x4e\xab\x9e\xd6\x67\xc3\xb0\xf1\xb6\xc9\xb6\xa9\xb7\
\x19\xb0\xe5\xd8\x06\xdb\xae\xb6\x6d\xb6\x7d\x61\x67\x62\x17\x67\
\xb7\xc5\xae\xc3\xee\x93\xbd\x93\x7d\xba\x7d\x8d\xfd\x3d\x07\x0d\
\x87\xd9\x0e\xab\x1d\x5a\x1d\x7e\x73\xb4\x72\x14\x3a\x56\x3a\xde\
\x9a\xce\x9c\xee\x3f\x7d\xc5\xf4\x96\xe9\x2f\x67\x58\xcf\x10\xcf\
\xd8\x33\xe3\xb6\x13\xcb\x29\xc4\x69\x9d\x53\x9b\xd3\x47\x67\x17\
\x67\xb9\x73\x83\xf3\x88\x8b\x89\x4b\x82\xcb\x2e\x97\x3e\x2e\x9b\
\x1b\xc6\xdd\xc8\xbd\xe4\x4a\x74\xf5\x71\x5d\xe1\x7a\xd2\xf5\x9d\
\x9b\xb3\x9b\xc2\xed\xa8\xdb\xaf\xee\x36\xee\x69\xee\x87\xdc\x9f\
\xcc\x34\x9f\x29\x9e\x59\x33\x73\xd0\xc3\xc8\x43\xe0\x51\xe5\xd1\
\x3f\x0b\x9f\x95\x30\x6b\xdf\xac\x7e\x4f\x43\x4f\x81\x67\xb5\xe7\
\x23\x2f\x63\x2f\x91\x57\xad\xd7\xb0\xb7\xa5\x77\xaa\xf7\x61\xef\
\x17\x3e\xf6\x3e\x72\x9f\xe3\x3e\xe3\x3c\x37\xde\x32\xde\x59\x5f\
\xcc\x37\xc0\xb7\xc8\xb7\xcb\x4f\xc3\x6f\x9e\x5f\x85\xdf\x43\x7f\
\x23\xff\x64\xff\x7a\xff\xd1\x00\xa7\x80\x25\x01\x67\x03\x89\x81\
\x41\x81\x5b\x02\xfb\xf8\x7a\x7c\x21\xbf\x8e\x3f\x3a\xdb\x65\xf6\
\xb2\xd9\xed\x41\x8c\xa0\xb9\x41\x15\x41\x8f\x82\xad\x82\xe5\xc1\
\xad\x21\x68\xc8\xec\x90\xad\x21\xf7\xe7\x98\xce\x91\xce\x69\x0e\
\x85\x50\x7e\xe8\xd6\xd0\x07\x61\xe6\x61\x8b\xc3\x7e\x0c\x27\x85\
\x87\x85\x57\x86\x3f\x8e\x70\x88\x58\x1a\xd1\x31\x97\x35\x77\xd1\
\xdc\x43\x73\xdf\x44\xfa\x44\x96\x44\xde\x9b\x67\x31\x4f\x39\xaf\
\x2d\x4a\x35\x2a\x3e\xaa\x2e\x6a\x3c\xda\x37\xba\x34\xba\x3f\xc6\
\x2e\x66\x59\xcc\xd5\x58\x9d\x58\x49\x6c\x4b\x1c\x39\x2e\x2a\xae\
\x36\x6e\x6c\xbe\xdf\xfc\xed\xf3\x87\xe2\x9d\xe2\x0b\xe3\x7b\x17\
\x98\x2f\xc8\x5d\x70\x79\xa1\xce\xc2\xf4\x85\xa7\x16\xa9\x2e\x12\
\x2c\x3a\x96\x40\x4c\x88\x4e\x38\x94\xf0\x41\x10\x2a\xa8\x16\x8c\
\x25\xf2\x13\x77\x25\x8e\x0a\x79\xc2\x1d\xc2\x67\x22\x2f\xd1\x36\
\xd1\x88\xd8\x43\x5c\x2a\x1e\x4e\xf2\x48\x2a\x4d\x7a\x92\xec\x91\
\xbc\x35\x79\x24\xc5\x33\xa5\x2c\xe5\xb9\x84\x27\xa9\x90\xbc\x4c\
\x0d\x4c\xdd\x9b\x3a\x9e\x16\x9a\x76\x20\x6d\x32\x3d\x3a\xbd\x31\
\x83\x92\x91\x90\x71\x42\xaa\x21\x4d\x93\xb6\x67\xea\x67\xe6\x66\
\x76\xcb\xac\x65\x85\xb2\xfe\xc5\x6e\x8b\xb7\x2f\x1e\x95\x07\xc9\
\x6b\xb3\x90\xac\x05\x59\x2d\x0a\xb6\x42\xa6\xe8\x54\x5a\x28\xd7\
\x2a\x07\xb2\x67\x65\x57\x66\xbf\xcd\x89\xca\x39\x96\xab\x9e\x2b\
\xcd\xed\xcc\xb3\xca\xdb\x90\x37\x9c\xef\x9f\xff\xed\x12\xc2\x12\
\xe1\x92\xb6\xa5\x86\x4b\x57\x2d\x1d\x58\xe6\xbd\xac\x6a\x39\xb2\
\x3c\x71\x79\xdb\x0a\xe3\x15\x05\x2b\x86\x56\x06\xac\x3c\xb8\x8a\
\xb6\x2a\x6d\xd5\x4f\xab\xed\x57\x97\xae\x7e\xbd\x26\x7a\x4d\x6b\
\x81\x5e\xc1\xca\x82\xc1\xb5\x01\x6b\xeb\x0b\x55\x0a\xe5\x85\x7d\
\xeb\xdc\xd7\xed\x5d\x4f\x58\x2f\x59\xdf\xb5\x61\xfa\x86\x9d\x1b\
\x3e\x15\x89\x8a\xae\x14\xdb\x17\x97\x15\x7f\xd8\x28\xdc\x78\xe5\
\x1b\x87\x6f\xca\xbf\x99\xdc\x94\xb4\xa9\xab\xc4\xb9\x64\xcf\x66\
\xd2\x66\xe9\xe6\xde\x2d\x9e\x5b\x0e\x96\xaa\x97\xe6\x97\x0e\x6e\
\x0d\xd9\xda\xb4\x0d\xdf\x56\xb4\xed\xf5\xf6\x45\xdb\x2f\x97\xcd\
\x28\xdb\xbb\x83\xb6\x43\xb9\xa3\xbf\x3c\xb8\xbc\x65\xa7\xc9\xce\
\xcd\x3b\x3f\x54\xa4\x54\xf4\x54\xfa\x54\x36\xee\xd2\xdd\xb5\x61\
\xd7\xf8\x6e\xd1\xee\x1b\x7b\xbc\xf6\x34\xec\xd5\xdb\x5b\xbc\xf7\
\xfd\x3e\xc9\xbe\xdb\x55\x01\x55\x4d\xd5\x66\xd5\x65\xfb\x49\xfb\
\xb3\xf7\x3f\xae\x89\xaa\xe9\xf8\x96\xfb\x6d\x5d\xad\x4e\x6d\x71\
\xed\xc7\x03\xd2\x03\xfd\x07\x23\x0e\xb6\xd7\xb9\xd4\xd5\x1d\xd2\
\x3d\x54\x52\x8f\xd6\x2b\xeb\x47\x0e\xc7\x1f\xbe\xfe\x9d\xef\x77\
\x2d\x0d\x36\x0d\x55\x8d\x9c\xc6\xe2\x23\x70\x44\x79\xe4\xe9\xf7\
\x09\xdf\xf7\x1e\x0d\x3a\xda\x76\x8c\x7b\xac\xe1\x07\xd3\x1f\x76\
\x1d\x67\x1d\x2f\x6a\x42\x9a\xf2\x9a\x46\x9b\x53\x9a\xfb\x5b\x62\
\x5b\xba\x4f\xcc\x3e\xd1\xd6\xea\xde\x7a\xfc\x47\xdb\x1f\x0f\x9c\
\x34\x3c\x59\x79\x4a\xf3\x54\xc9\x69\xda\xe9\x82\xd3\x93\x67\xf2\
\xcf\x8c\x9d\x95\x9d\x7d\x7e\x2e\xf9\xdc\x60\xdb\xa2\xb6\x7b\xe7\
\x63\xce\xdf\x6a\x0f\x6f\xef\xba\x10\x74\xe1\xd2\x45\xff\x8b\xe7\
\x3b\xbc\x3b\xce\x5c\xf2\xb8\x74\xf2\xb2\xdb\xe5\x13\x57\xb8\x57\
\x9a\xaf\x3a\x5f\x6d\xea\x74\xea\x3c\xfe\x93\xd3\x4f\xc7\xbb\x9c\
\xbb\x9a\xae\xb9\x5c\x6b\xb9\xee\x7a\xbd\xb5\x7b\x66\xf7\xe9\x1b\
\x9e\x37\xce\xdd\xf4\xbd\x79\xf1\x16\xff\xd6\xd5\x9e\x39\x3d\xdd\
\xbd\xf3\x7a\x6f\xf7\xc5\xf7\xf5\xdf\x16\xdd\x7e\x72\x27\xfd\xce\
\xcb\xbb\xd9\x77\x27\xee\xad\xbc\x4f\xbc\x5f\xf4\x40\xed\x41\xd9\
\x43\xdd\x87\xd5\x3f\x5b\xfe\xdc\xd8\xef\xdc\x7f\x6a\xc0\x77\xa0\
\xf3\xd1\xdc\x47\xf7\x06\x85\x83\xcf\xfe\x91\xf5\x8f\x0f\x43\x05\
\x8f\x99\x8f\xcb\x86\x0d\x86\xeb\x9e\x38\x3e\x39\x39\xe2\x3f\x72\
\xfd\xe9\xfc\xa7\x43\xcf\x64\xcf\x26\x9e\x17\xfe\xa2\xfe\xcb\xae\
\x17\x16\x2f\x7e\xf8\xd5\xeb\xd7\xce\xd1\x98\xd1\xa1\x97\xf2\x97\
\x93\xbf\x6d\x7c\xa5\xfd\xea\xc0\xeb\x19\xaf\xdb\xc6\xc2\xc6\x1e\
\xbe\xc9\x78\x33\x31\x5e\xf4\x56\xfb\xed\xc1\x77\xdc\x77\x1d\xef\
\xa3\xdf\x0f\x4f\xe4\x7c\x20\x7f\x28\xff\x68\xf9\xb1\xf5\x53\xd0\
\xa7\xfb\x93\x19\x93\x93\xff\x04\x03\x98\xf3\xfc\x63\x33\x2d\xdb\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x25\x00\x00\x80\x83\
\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x6f\x92\x5f\xc5\x46\x00\x00\x28\x02\
\x49\x44\x41\x54\x78\xda\xec\xdd\x79\xfc\x6d\x75\x5d\xef\xf1\xd7\
\x8f\x03\xe7\x80\x80\x88\xa2\x28\x0e\x89\xe4\x98\xf3\x48\xce\x66\
\x99\x68\xa6\xde\x32\x87\xf2\x6a\x9a\xa6\x95\x0d\xda\x55\xaf\x69\
\x96\x95\x59\xb7\xd4\xae\xde\x66\xbd\x59\x96\xe1\x50\xa6\xa9\x39\
\xe3\x80\x43\x5d\x01\x07\xc4\x01\xc3\x01\x45\x45\x45\x01\x91\xf1\
\xfc\xee\x1f\xeb\x07\x9e\xc0\x33\xfc\xce\x6f\xd8\x7b\xad\xfd\x7c\
\x3e\x1e\xdf\xc7\x01\xfd\x71\x7e\x6b\x7d\xd6\x77\xef\xfd\xde\xdf\
\xf5\x5d\xdf\xef\xd2\xf2\xf2\x72\x00\x00\xf3\x6a\x1f\x25\x00\x00\
\x84\x15\x00\x00\x61\x05\x00\x10\x56\x00\x00\x84\x15\x00\x00\x61\
\x05\x00\x10\x56\x00\x00\x84\x15\x00\x40\x58\x01\x00\x10\x56\x00\
\x00\x84\x15\x00\x40\x58\x01\x00\x10\x56\x00\x00\x61\x05\x00\x40\
\x58\x01\x00\x10\x56\x00\x00\x61\x05\x00\x40\x58\x01\x00\x84\x15\
\x00\x00\x61\x05\x00\x40\x58\x01\x00\x84\x15\x00\x00\x61\x05\x00\
\x10\x56\x00\x00\x84\x15\x00\x00\x61\x05\x00\x10\x56\x00\x00\x84\
\x15\x00\x00\x61\x05\x00\x10\x56\x00\x00\x84\x15\x00\x40\x58\x01\
\x00\x10\x56\x00\x00\x84\x15\x00\x40\x58\x01\x00\x10\x56\x00\x00\
\x61\x05\x00\x40\x58\x01\x00\x10\x56\x00\x00\x61\x05\x00\x40\x58\
\x01\x00\x84\x15\x00\x00\x61\x05\x00\x40\x58\x01\x00\x84\x15\x00\
\x00\x61\x05\x00\x10\x56\x00\x00\x84\x15\x00\x00\x61\x05\x00\x10\
\x56\x00\x00\x84\x15\x00\x00\x61\x05\x00\x10\x56\x00\x00\x84\x15\
\x00\x40\x58\x01\x00\x10\x56\x00\x00\x84\x15\x00\x40\x58\x01\x00\
\x10\x56\x00\x00\x61\x05\x00\x40\x58\x01\x00\x10\x56\x00\x00\x61\
\x05\x00\x40\x58\x01\x00\x84\x15\x00\x00\x61\x05\x00\x40\x58\x01\
\x00\x84\x15\x00\x00\x61\x05\x00\x10\x56\x00\x00\x84\x15\x00\x00\
\x61\x05\x00\x10\x56\x00\x00\x84\x15\x00\x00\x61\x05\x00\x10\x56\
\x00\x00\x84\x15\x00\x40\x58\x01\x00\x10\x56\x00\x00\x84\x15\x00\
\x40\x58\x01\x00\x10\x56\x00\x00\x61\x05\x00\x40\x58\x01\x00\x10\
\x56\x00\x00\x61\x05\x00\x40\x58\x01\x00\x84\x15\x00\x00\x61\x05\
\x00\x40\x58\x01\x00\x84\x15\x00\x00\x61\x05\x00\x10\x56\x00\x00\
\x84\x15\x00\x00\x61\x05\x00\x10\x56\x00\x00\x84\x15\x00\x00\x61\
\x05\x00\x10\x56\x00\x00\x84\x15\x00\x40\x58\x01\x00\x98\x8d\x7d\
\x77\xfc\x97\xa5\xa5\x25\x15\x01\x98\x0f\x07\x57\x77\xa9\x6e\x5f\
\xdd\xb8\xba\x4e\x75\x95\x95\xf7\xed\x45\x7b\xb3\x5e\xae\x2e\xae\
\xbe\x55\x7d\xa9\xfa\x44\xf5\xa1\xea\xf8\xea\x9b\xba\xca\x44\x2e\
\xf2\xf2\xf2\xae\xff\xcf\x4b\x1b\x00\x33\xb5\x7f\xf5\xc8\xea\xcd\
\xd5\x85\x2b\x1f\xd2\xda\xce\xdb\xc5\xd5\x3b\xaa\xc7\x56\x07\xea\
\x3e\xe3\x0f\x2b\x3b\x6b\xc2\x0a\xc0\xec\x5d\xa9\x7a\x7a\xf5\x15\
\x01\x64\xaf\xdb\x37\xaa\xdf\x6e\x18\x91\x42\x58\x01\x60\x1d\x3d\
\xb0\xfa\x82\xb0\xb1\x6e\xed\xcb\xd5\x23\x74\x2b\x61\x05\x80\xb5\
\xdb\xbf\x7a\x89\x70\xb1\x61\xed\x1f\x8d\xb2\x08\x2b\x00\xec\xbd\
\xc3\xab\xff\x10\x28\x36\xbc\x7d\xac\xba\x9e\xee\x26\xac\x00\xb0\
\x3a\x47\x54\x9f\x14\x24\x36\xad\x7d\xbe\x3a\x4a\xb7\x13\x56\x00\
\xd8\x33\x57\xa9\x4e\x16\x20\x36\xbd\x9d\xb6\x32\x9a\xc5\x48\xc3\
\x8a\x45\xe1\x00\x36\xc7\x3e\xd5\xb1\xd5\xcd\x94\x62\xd3\x5d\xbf\
\xfa\xe7\x6a\x3f\xa5\x98\x40\x92\x01\x60\xc3\x3c\xdd\x08\xc7\xcc\
\xdb\xf3\x74\xc3\x71\x8e\xac\x2c\xed\x18\x52\xac\x60\x0b\xb0\x21\
\x6e\x52\x7d\xb8\xda\xaa\x14\x33\x75\x49\x75\xa7\x86\xd5\x6f\x99\
\xc3\xb0\xb2\x33\x6e\x03\x01\x6c\xbc\x3f\x12\x54\xe6\xc2\x96\xea\
\x85\xca\x30\xd2\x24\xe3\x36\x10\xc0\x86\xb9\x53\x6e\xbf\xcc\x5b\
\xfb\x51\xdd\x72\xbe\xf3\x88\x09\xb6\x00\x9b\xeb\xc9\x4a\x30\x77\
\x9e\xa2\x04\xe3\x62\xce\x0a\xc0\xc6\xb9\x5a\x75\x46\x9e\x42\x99\
\xbb\x2f\xf1\xd5\x91\xd5\xe7\x94\x62\x8e\x2e\x8a\x39\x2b\x00\x33\
\xf1\x40\x41\x65\x3e\xbf\xa8\x57\x3f\xa1\x0c\xe3\x21\xac\x00\x6c\
\x9c\xfb\x2a\x81\x6b\xc3\x3a\xa4\x4b\xb7\x81\x00\x36\xcc\x17\xaa\
\xeb\x28\xc3\x5c\x3a\xb7\xba\x72\xc3\x2d\x21\xe6\x80\xdb\x40\x00\
\x9b\xef\x50\x41\x65\xae\x1d\xd4\x30\x6f\x85\x11\x10\x56\x00\x36\
\xc6\x0d\x94\xc0\x35\x42\x58\x01\x98\x67\xd7\x50\x02\xd7\x08\x61\
\x05\x60\x9e\x1d\xa0\x04\x73\xef\x4a\x4a\x20\xac\x00\x2c\xb2\xed\
\x4a\xe0\x1a\x21\xac\x00\xcc\xb3\x73\x95\x60\xee\x9d\xa3\x04\xc2\
\x0a\xc0\x22\xfb\x92\x12\xb8\x46\x08\x2b\x00\xf3\xec\x3f\xab\x4b\
\x94\x61\xae\x7d\x5a\x09\x84\x15\x80\x45\x76\x7e\x75\x8a\x32\xcc\
\xad\x2f\x56\x5f\x55\x06\x61\x05\x60\xd1\xbd\x47\x09\xe6\xd6\xbb\
\x94\x40\x58\x01\xa0\xfe\x55\x09\xe6\xd6\x1b\x94\x60\x3c\xec\x0d\
\x04\xb0\x71\xf6\x6b\x98\xc4\x79\x98\x52\xcc\x95\x6f\x57\xd7\xca\
\xd3\x40\x73\xc5\xde\x40\x00\xb3\x71\x51\xf5\x52\x65\x98\x3b\xff\
\x20\xa8\x8c\x8b\x91\x15\x80\x8d\x75\x44\xf5\x99\x6a\x7f\xa5\x98\
\x0b\x97\x54\x37\xab\x3e\xa5\x14\xf3\xc5\xc8\x0a\xc0\xec\x7c\xa9\
\x7a\xb1\x32\xcc\x8d\x97\x0a\x2a\xe3\x63\x64\x05\x60\xe3\x1d\x5c\
\x7d\xbc\xba\x8e\x52\xcc\xd4\xd7\xab\x9b\x54\x5f\x53\x8a\xf9\x63\
\x64\x05\x60\xb6\xce\xa9\x1e\x53\x2d\x2b\xc5\x4c\x3d\x41\x50\x19\
\x71\x92\xb9\xb4\x01\xb0\xa1\x9e\xbd\x12\x58\xb4\xcd\x6f\x2f\xd0\
\xfd\xc6\x93\x47\x2e\xdf\x84\x15\x80\xcd\xf5\x32\xc1\x61\xd3\xdb\
\x6b\xab\x2d\xba\x9e\xb0\x02\xc0\x9e\xd9\x52\xbd\x5c\x80\xd8\xb4\
\xf6\xba\x6a\x9b\x6e\x27\xac\x00\xb0\x3a\x4b\xd5\xf3\x04\x89\x0d\
\x6f\x7f\x5a\xed\xab\xbb\x09\x2b\x00\xec\xbd\x07\x37\x4c\xf8\x14\
\x2c\xd6\xb7\x9d\x5d\x3d\x52\xf7\x12\x56\x00\x58\x1f\xd7\xa8\xfe\
\xa6\xda\x2e\x64\xac\x4b\x7b\x65\x1e\x11\x17\x56\x00\xd8\x10\xb7\
\x68\x58\x06\xfe\x22\x81\x63\xd5\xed\xe2\xea\x9f\xaa\x3b\xe8\x46\
\xd3\x0c\x2b\x16\x85\x03\x98\x2f\x87\x57\x0f\xad\x7e\xbc\xba\x4b\
\x96\xe9\xdf\x99\x0b\xaa\x0f\x54\xaf\xaf\xfe\xb1\xfa\xa2\x92\x8c\
\x3f\xac\xec\x8c\xb0\x02\x30\xbf\xb6\x36\xec\x63\x73\x93\x86\x5b\
\x1b\x87\xae\xfc\x6f\xbb\x7b\xb3\x7e\x68\xe3\xb9\x15\x72\x72\xf5\
\x6f\xbb\xfb\x1c\xab\x2e\xac\xbe\x55\x9d\x5e\x7d\xb2\xfa\xd8\x4a\
\x60\x61\x01\xc2\x8a\xdb\x40\x00\xd3\x73\x5c\xe3\xb9\x85\xf3\x37\
\x2e\x17\x97\xcf\x23\x97\x6f\x96\xdb\x07\x00\xe6\x9a\xb0\x02\x00\
\x08\x2b\x00\x00\xc2\x0a\x00\x20\xac\x00\x00\x6c\xb6\x8d\xda\x33\
\xe1\xfb\x1b\x1e\xb5\xbb\x41\x75\x95\xea\xc0\xec\x78\x09\x6c\x9e\
\xed\xd5\xf9\xd5\x99\xd5\x69\xd5\x47\xab\xcf\x29\x0b\x2c\x76\x58\
\xd9\x56\x3d\xa0\xfa\x89\xea\x5e\x0d\x8b\x1a\x01\xcc\x93\x33\xaa\
\xb7\x54\xaf\x6e\x58\xd7\xe3\x62\x25\x81\x71\x58\xeb\x6d\xa0\xab\
\x55\xbf\x53\x7d\xa9\x7a\x55\xf5\x30\x41\x05\x98\x53\xd7\xaa\x1e\
\xd5\xb0\xe2\xe9\xe7\xaa\xff\x59\x1d\xac\x2c\x33\x77\x70\x75\x80\
\x32\xb0\x2b\x7b\xbb\x82\xed\xbe\xd5\xaf\x56\xcf\xae\x0e\x52\x46\
\x60\xa4\xce\xac\x7e\xa3\xfa\xeb\x86\x05\xca\xa6\xe2\xb8\xea\x1e\
\x23\x3a\xde\xed\x0d\xb7\xeb\x4e\x5e\x69\x1f\x5f\xf9\xf3\x94\x86\
\xdb\x79\x2c\x80\xf5\x5e\x6e\xff\x86\xd5\xb1\xd5\x6d\x94\x16\x98\
\x88\x77\x54\x8f\x6c\x18\x25\x16\x56\xe6\xc7\xc5\x0d\xcb\xea\x7f\
\x70\x87\x76\xca\xc4\x82\x25\x1b\x10\x56\x7e\xac\x61\x57\x50\x43\
\xa7\xc0\xd4\x7c\xa5\x7a\x70\xf5\x7e\x61\x65\xae\x9d\x5d\xfd\x47\
\x75\x7c\xf5\xf6\x86\xcd\x0c\x2f\xd4\x7d\x85\x95\x4b\x3d\xaa\x7a\
\x49\x9e\xea\x01\xa6\xeb\xfc\xea\xbf\x55\x6f\x12\x56\x46\xe3\xdb\
\xd5\x7b\x56\x82\xcb\xdb\xab\x93\x32\xf2\xb2\xb0\x61\xe5\xa1\x0d\
\x23\x2a\xd6\x65\x01\xa6\xee\x82\xea\x7e\x0d\xb7\x86\x84\x95\xf1\
\xf9\xda\x4a\xd8\x7c\x6d\xc3\x53\x5f\xe7\xe9\xd2\x8b\x11\x56\xee\
\xb2\xf2\xa2\xdd\xaa\x94\xc0\x82\xf8\x56\x75\xc7\xea\x53\xc2\xca\
\xa8\x7d\xa7\x7a\xdb\x4a\x70\x79\xdd\x4a\x90\x61\x82\x61\xe5\xb0\
\xea\xc4\xea\x3a\xca\x08\x2c\x98\x8f\x56\x77\x68\x18\x69\x11\x56\
\xc6\xef\x92\xea\xdd\xd5\xcb\x1b\xd6\xda\x39\x5b\x49\xc6\x13\x56\
\x76\x77\x5b\xe7\x4f\x04\x15\x60\x41\xdd\xa2\xfa\x2d\x65\x98\x8c\
\x2d\x0d\x8b\x96\xbe\xa4\x61\x32\xf5\xb1\x0d\x0f\x8d\xec\xab\x34\
\xf3\x6f\x57\x23\x2b\xf7\x6a\xdc\xf7\x6c\x01\xd6\xea\xa2\xea\xe6\
\x8d\xef\x76\xd0\x71\x19\x59\xd9\x53\x67\x56\xff\x58\xfd\x45\xc3\
\xda\x2e\xcc\xc8\xde\x8e\xac\xfc\x9e\xd2\x01\x0b\x6e\xbf\xea\x39\
\xca\x30\x69\x57\xaf\x9e\xd4\xb0\x9e\xcb\xbb\x1b\x56\x62\xdf\x4f\
\x59\xe6\xcb\xce\xc2\xca\x3d\xaa\x1f\x54\x1e\x80\x1e\xd2\xb0\x29\
\x2b\xd3\x77\xb7\xea\x15\xd5\x17\xaa\xdf\xad\xae\xab\x24\xf3\x1d\
\x56\x1e\xaf\x34\x00\x97\xbd\x4f\x3e\x4e\x19\x16\xca\xe1\x0d\xdb\
\x30\x9c\xd6\x70\x8b\xe8\xd6\x4a\x32\x7f\x61\xe5\xc0\xea\x41\x4a\
\x03\x70\x99\x87\x2b\xc1\x42\xda\xd2\xb0\xce\xd8\x89\x0d\x6b\xb6\
\xdc\x4b\x49\xe6\x27\xac\xdc\xa3\xba\x92\xd2\x00\x5c\xe6\xfb\xaa\
\x1f\x50\x86\x85\xf6\xa3\x0d\x0f\x9d\x7c\xb0\x61\x95\xe3\x25\x25\
\x99\x6d\x58\xb9\xa7\xb2\x00\x5c\xc1\xdd\x95\x80\x86\xc5\x02\x5f\
\xd3\x30\xda\xf2\x00\xe5\x98\x5d\x58\xb9\x95\xb2\x00\x5c\x81\x9d\
\xe6\xb9\xfc\x67\xe5\xeb\xaa\xf7\xf9\x92\x3f\x9b\xb0\x72\x63\x65\
\x01\xb8\x82\x1b\x2a\x01\xdf\xc3\x0f\x56\xef\xac\xde\xda\x30\xea\
\xc2\x26\x85\x95\x6b\x2a\x0b\xc0\x15\x1c\xa1\x04\xec\xc2\x0f\x37\
\xcc\x67\xf9\xc7\x3c\xf2\xbc\xe1\x61\x65\xdf\x6a\x9b\xb2\x00\x5c\
\xc1\xc1\x4a\xc0\x1e\x78\x68\xf5\x89\xea\xd9\xd5\x01\xca\xb1\x31\
\x61\x65\x8b\x92\x00\xec\xd1\xfb\x25\xec\xcc\x95\x1a\xf6\x95\xfa\
\xc4\x4a\x78\x61\x9d\x5f\x7c\x17\x54\x17\x2b\x0b\xc0\x15\x9c\x37\
\xa2\x63\xbd\xc4\xe5\x9a\x0b\xd7\x6b\xb8\x2d\xf4\xae\xea\xa6\xca\
\xb1\xbe\xdf\x14\xbe\xae\x2c\x00\x57\xf0\xd5\x11\x1d\xeb\xb9\x2e\
\xd7\x5c\xb9\x7b\xc3\xa3\xce\xbf\x99\x7d\x87\xd6\x2d\xac\x7c\x5a\
\x59\x00\xae\xe0\x33\x23\x3a\xd6\x2f\xbb\x5c\x73\x67\x5b\xf5\xdb\
\xd5\x09\xd5\xd1\xca\xb1\xf6\xb0\xf2\x71\x65\x01\xb8\x82\x8f\x0a\
\x56\xac\x83\x9b\x57\xc7\x57\xff\xbb\x3a\x48\x39\xf6\x3e\xac\xbc\
\x5b\x59\x00\xae\xe0\xf8\x11\x1d\xeb\x49\x2e\xd7\xdc\x7f\xf6\x3e\
\xa9\xfa\x48\x75\x67\xe5\xd8\xbd\xa5\xe5\xe5\xe5\xef\xfe\xcb\xd2\
\x52\xd5\xb5\xaa\x2f\x66\xdf\x03\x80\x4b\x7d\xab\xba\x7a\x75\xd1\
\x48\x8e\xf7\xca\x0d\xf3\x0f\xf7\x75\xe9\xe6\xde\x25\xd5\xef\x37\
\xdc\x22\x5a\xe8\x07\x5c\x76\xcc\x23\xdf\x2b\xdd\x5d\xde\x19\xd5\
\x71\xfa\x0f\xc0\x65\x5e\x3b\xa2\xa0\x52\x75\x76\xf5\x5e\x97\x6d\
\x14\xb6\x54\xcf\x6c\x58\xb6\xdf\x2a\xc9\xab\x08\x2b\x55\xff\x57\
\x69\x00\x2e\xf3\x92\x11\x1e\xf3\xb1\x2e\xdb\xa8\xdc\xa1\xe1\x89\
\xa1\xc7\x2b\xc5\x15\x7d\xaf\xdb\x40\x35\x3c\x5a\x75\x6a\xc3\x33\
\xe2\x00\x8b\xec\xdf\xab\x3b\x8d\xf0\xb8\xaf\xdc\x70\x4b\xdf\x24\
\xce\xf1\x79\x45\xf5\xb8\xea\xdb\x8b\x74\xd2\xab\xbd\x0d\x54\xc3\
\x70\xe7\x73\xf5\x17\x80\x9e\x35\xd2\xe3\x3e\xbb\xfa\x0b\x97\x6f\
\x94\x1e\xbe\x12\x92\x6f\xa2\x14\x83\x9d\x8d\xac\x5c\x1a\x64\xfe\
\xa3\xba\xad\x32\x01\x0b\xea\x75\xd5\x03\x47\x7c\xfc\x57\x6f\x18\
\x25\xbf\xb2\x4b\x39\x4a\xe7\x56\x8f\xa9\x5e\xb5\x08\x27\xbb\x37\
\x23\x2b\x55\xdb\xab\x9f\xab\x2e\xd4\x5f\x80\x05\x74\x56\xf5\x8b\
\x23\x3f\x87\x33\x1b\x56\x4d\x65\x9c\x0e\xaa\x5e\x59\xbd\xb0\x05\
\x5f\xf9\x76\x77\x1b\x73\x9d\x58\x3d\x45\x7f\x01\x16\xcc\x72\xf5\
\xe8\xea\xf4\x09\x9c\xcb\x8b\x1a\xf6\xa6\x61\xbc\x7e\xa5\xfa\xb7\
\xea\x50\x61\x65\xe7\x5e\x5c\xfd\x1f\x7d\x05\x58\x20\xcf\x68\xb8\
\x05\x34\x05\xdb\xab\x47\x64\x09\xfe\xb1\xfb\xa1\xea\x03\x2d\xe8\
\xe3\xcd\x7b\xba\xe5\xf9\x2f\x57\x7f\xa7\xaf\x00\x0b\xe0\x79\x2b\
\x6d\x4a\xbe\x54\x3d\x20\x1b\x1c\x8e\xdd\x8d\x56\x02\xcb\x3d\x85\
\x95\x9d\x27\xf3\x47\x55\x2f\xd0\x57\x80\x89\x5a\xae\x9e\x5a\xfd\
\xcf\x89\x9e\xdf\xff\xab\xee\x2f\xb0\x8c\xde\x55\xab\xb7\x54\x8f\
\x5d\xa4\x93\xde\xd5\xd3\x40\x3b\xf3\x33\xd5\x9f\x57\x07\xea\x33\
\xc0\x44\x7c\xad\xfa\xef\xd5\x9b\x16\xe0\x5c\x6f\x5b\xbd\xbe\x3a\
\xc2\x65\x1f\xbd\xe7\x56\xbf\x31\x99\x6f\x0b\x7b\xf9\x34\xd0\xce\
\xbc\xbc\xba\x55\xc3\x64\x1f\x80\xb1\x3b\xb6\xba\xc5\x82\x04\x95\
\xaa\x13\x56\x02\xcb\x5b\x5c\xfa\xd1\x7b\x46\xc3\x5a\x3a\xfb\x4c\
\xfd\x44\xf7\xf6\x04\x3f\x53\x1d\xd3\x30\xa4\xf8\x41\xfd\x05\x18\
\xa1\x77\x54\x77\xad\x1e\xd6\xe2\x4d\x3e\xfd\x4a\x75\xdf\x86\xe5\
\x29\xce\xd4\x15\x46\xed\xf1\x2b\x81\x7b\xeb\x94\x4f\x72\x6f\x6e\
\x03\x7d\x2f\x77\x6e\x18\x42\xfd\x89\xea\x30\x7d\x07\x98\x53\x5f\
\xac\x5e\x53\xbd\xb4\xfa\xb0\x72\x54\xc3\x82\x71\xbf\x52\x3d\xa9\
\x61\x11\x39\xc6\xe9\x6d\xd5\x83\x1b\xf1\x9c\xa4\x5d\xdd\x06\x5a\
\xaf\xb0\x72\xd9\x5f\x51\xdd\x72\x25\xbc\xdc\xb4\x3a\xaa\x3a\xa4\
\x3a\xb8\x61\x67\x49\x80\xcd\x70\x49\x75\xde\xca\xa8\xc1\x69\xd5\
\xc7\x1a\x76\xb5\x3d\x59\x69\x76\x6a\x5b\xc3\x6a\xbd\x3f\x55\xfd\
\x48\x56\xbd\x1d\xa3\xff\x68\x18\x31\xfb\x86\xb0\x02\xc0\xd4\x6d\
\x69\x98\xc7\x73\xab\x95\x2f\x9d\x87\x37\x3c\x54\xb1\xad\xe1\x76\
\xc3\xd6\x1d\xfe\x79\xff\x86\x27\x54\x0e\x5b\xf9\x62\xca\x6c\x9d\
\x54\xdd\x7b\x8c\x81\x45\x58\x01\x60\x33\x6c\xad\xae\xb6\xd2\x8e\
\xa8\x8e\xac\xae\xbf\xc3\x9f\x47\x65\xaa\xc0\x66\x38\xb1\xfa\xe1\
\xb1\x05\x16\x61\x05\x80\x79\x71\xcd\x86\xe9\x02\x3b\xb6\x1f\xa8\
\xf6\x55\x9a\xc5\x0e\x2c\xc2\x0a\x00\xf3\xec\xc0\xea\x4e\xd5\x5d\
\x56\xda\x0f\x66\xce\xcc\x7a\x05\x96\x7b\x37\x6c\xca\x29\xac\x00\
\xc0\x3a\xda\xa7\xba\x5d\xc3\x12\x19\xc7\x54\x77\x6c\x01\xd6\x12\
\xd9\x20\x1f\x6a\xd8\x57\xe8\xec\x31\x87\x95\x96\x97\x97\x2f\x6b\
\x00\x30\x87\xae\x56\x3d\xbc\x7a\x45\xc3\xa3\xb9\xcb\xda\xaa\xda\
\x3b\x1a\x26\x44\xcf\x7d\x58\xd9\x59\x13\x56\x00\x18\x93\x2b\x55\
\x0f\xa9\x5e\xd5\xf0\x78\xba\x30\xb2\x67\xed\x35\xcd\xf9\xe8\x94\
\xb0\x02\xc0\x14\x1d\xd4\xb0\x20\xe9\x7b\x84\x91\x3d\x6a\x7f\x21\
\xac\x00\xc0\xec\xdc\xb4\x7a\x7e\xc3\xa6\x94\x82\xc9\xce\xdb\xef\
\x08\x2b\x00\x30\x5b\xdb\x56\x46\x5b\x3e\x2c\x98\xec\xb4\x3d\x41\
\x58\x01\x80\xf9\x70\x9f\x86\xdd\xa5\x05\x94\xff\xda\x2e\x6a\x58\
\x83\x45\x58\x01\x80\x39\x71\xcb\xea\x95\xd5\x76\x41\xe5\xb2\x76\
\x56\x75\x63\x61\x05\x00\xe6\xcb\xad\xaa\x7f\x11\x54\x2e\x6b\x9f\
\xaa\x0e\x15\x56\x00\x60\xfe\xdc\xa1\x7a\xb3\xb0\xd2\x72\xf5\xf6\
\xe6\x64\xab\x03\x61\x05\x00\xae\xe8\x98\xea\x14\x81\xa5\xff\x23\
\xac\x00\xc0\xfc\xda\xaf\xfa\xd5\x86\x39\x1c\x8b\x1c\x58\x1e\x21\
\xac\x00\xc0\x7c\x3b\xac\x7a\xc9\x02\x87\x95\x73\xab\x9b\x09\x2b\
\x00\x30\xff\x7e\xa8\x3a\x75\x41\x03\xcb\xc7\x1b\x76\xc0\x9e\xbb\
\xb0\x62\xd7\x65\x00\x76\xe6\x80\xea\xfb\xaa\xc3\x1b\xf6\xe4\xa9\
\x3a\xa7\xfa\x72\xf5\xb9\x86\xf5\x3a\xa6\x7a\xde\xbf\x5d\x3d\xb9\
\xda\xb2\x60\xd7\xfc\x15\xcd\xe8\x96\x90\x5d\x97\x01\xd8\x13\x07\
\x56\x3f\xd9\x70\x3b\xe4\x94\xea\x92\x5d\x7c\x0b\xbf\xb0\x3a\xa9\
\x7a\x51\x75\xdf\x86\xb9\x1f\x53\x73\xfb\xea\x93\x2d\xde\x08\xcb\
\x13\x67\x15\x56\xdc\x06\x02\x60\x67\x6e\xd2\xb0\xc9\xdd\x39\x6b\
\xf8\x80\x3b\xb3\xfa\x83\xea\xda\x13\x0c\x70\x7f\xb5\x60\x61\xe5\
\xbc\x86\xbd\x96\x84\x15\x00\x66\xee\x88\xea\x65\xbb\x19\x41\x59\
\x6d\x3b\xbf\xfa\xe3\xea\x90\x89\xd5\xea\xc1\x2d\xd6\x26\x89\x1f\
\x6a\x93\x47\xcb\x84\x15\x00\x2e\xef\x31\xd5\xb7\x36\xf0\xc3\xee\
\x8b\x0d\xeb\x98\x4c\xc9\xb5\xab\xe3\x17\x28\xb0\xfc\x9e\xb0\x02\
\xc0\x2c\xec\x5f\xfd\xdd\x26\x7d\xd8\x6d\xaf\x9e\x5b\x4d\xe9\xe9\
\x8d\xfd\x1a\xe6\xe9\x2c\x42\x58\xb9\xb8\xba\x8b\xb0\x02\xc0\x66\
\x3a\xa4\x7a\xef\x0c\x3e\xf4\x8e\x6d\x7a\x13\x70\x1f\xd9\x30\xb7\
\x63\xea\x81\xe5\x33\xd5\xc1\xc2\x0a\x00\x9b\xe1\xa0\xea\x03\x33\
\xfc\xd0\x7b\x4d\xd3\x7b\x0c\xf8\xd6\xd5\xe7\x17\x20\xb0\xfc\x99\
\xb0\x02\xc0\x46\xdb\xa7\x7a\xe3\x1c\x7c\xe8\xbd\x78\x82\xb5\x3d\
\xa2\x3a\x61\xe2\x61\x65\x7b\x75\x57\x61\x05\x80\x8d\xf4\x3b\x73\
\xf4\xc1\xf7\xc8\x89\x8e\x5a\xfd\x6b\xd3\x5f\xdd\x76\xab\xb0\x02\
\xc0\x46\x38\xba\xf5\x7d\x34\x79\xad\xed\xec\xea\xba\x13\xac\xf3\
\x96\xea\x4f\x27\x1e\x58\x9e\x2d\xac\x00\xb0\x11\x1f\xa0\x1f\x9e\
\xc3\x0f\xbd\xd7\x4c\xb8\xe6\xbf\x3b\xe1\xb0\x72\x41\x1b\xb8\x58\
\x9c\xb0\x02\xb0\x98\x1e\x35\xc7\x1f\x7c\x77\x9e\x70\xdd\x9f\x3e\
\xe1\xc0\xf2\x9e\x36\xe8\x51\x74\x61\x05\x60\xf1\x2c\x35\xdf\xfb\
\xda\xbc\x61\xe2\xf5\xff\xa5\x86\x89\xa9\x53\x0c\x2c\x3f\x23\xac\
\x00\xb0\x1e\x7e\xa4\xf9\x7f\xc2\xe4\x06\x13\xbf\x06\x8f\x9d\x68\
\x60\x39\xbd\x61\xcf\xa4\x4d\x0b\x2b\xfb\x78\x3d\x03\x4c\xd2\x7f\
\x1f\xc1\xc8\xcf\x23\x26\x7e\x0d\x5e\x52\xfd\xf2\x04\xcf\xeb\xda\
\xd5\xd3\x36\xf5\x37\x1a\x59\x01\x98\x9c\x2d\xd5\x37\x46\xf0\x0d\
\xfd\x3f\x16\xe4\x7a\x3c\x6d\x82\xa3\x2b\xe7\x55\xdf\xb7\x59\x23\
\x2b\xc2\x0a\xc0\xf4\xdc\x6e\x24\x1f\x78\x97\x54\x57\x5e\x90\x6b\
\x32\xc5\xa7\x84\x8e\x15\x56\x00\xd8\x5b\x8f\x1f\xd1\x07\xde\x3d\
\x17\xe8\xba\x4c\x71\x1d\x96\x75\x5b\xd9\xd6\x9c\x15\x80\xc5\x72\
\x13\xc7\x3a\x97\x9e\xd4\xf4\x9e\x82\x7a\xee\x66\xfc\x12\x61\x05\
\x60\x7a\xae\xe3\x58\xe7\xd2\x25\xd5\x43\xab\x13\x27\x74\x4e\x77\
\xab\xee\x23\xac\x00\xb0\x5a\x57\x19\xd1\xb1\x1e\xba\x60\xd7\xe6\
\xdb\xd5\x8f\x55\x5f\x98\xd0\x39\xfd\xae\xb0\x02\xc0\x6a\xed\x37\
\xa2\x63\xdd\xba\x80\xd7\xe7\x4b\xd5\x03\xaa\xef\x4c\xe4\x7c\xee\
\x50\x3d\x50\x58\x01\x60\x35\x96\x1c\xeb\xdc\xfb\x70\xc3\x44\xe8\
\xa9\xf8\x9d\x8d\xcc\x14\xc2\x0a\x00\xcc\xc6\xcb\xab\x17\x4f\xe4\
\x5c\x6e\x51\x3d\x64\xa3\xfe\xf2\x7d\xf5\x95\x55\xd9\xbf\xba\x79\
\x75\xc3\xea\x88\xea\x60\x35\x84\xb9\xb4\xbd\x3a\xbf\x3a\xb3\x3a\
\xad\xfa\x68\xf5\x15\x65\x61\x0e\x3d\xb9\xba\x4d\x75\x97\x09\x9c\
\xcb\xd3\x5b\xe7\xb5\x57\x84\x95\x3d\x77\xfd\xea\x61\x0d\x13\xa2\
\xee\xd8\xb8\xee\x05\x03\xdf\x75\x5a\xf5\x96\xea\xd5\xd5\x3b\x56\
\x02\x0d\xcc\xda\x45\x0d\x23\x12\x1f\xa9\x0e\x1b\xf9\xb9\xdc\xba\
\xe1\xc9\xa0\xb7\xac\xf7\x5f\xec\x36\xd0\xce\xdd\xa3\x7a\x63\xf5\
\x9f\xd5\xef\xaf\xa4\x5e\x41\x05\xc6\xeb\xc8\xea\xe7\xab\xb7\x56\
\x9f\xa9\x7e\xad\x3a\x40\x59\x98\x03\x67\x54\x3f\x37\x91\x73\x79\
\xea\x46\xfc\xa5\xc2\xca\x15\xdd\xb0\x7a\x53\x75\x5c\x75\x4c\x8b\
\x3b\xf9\x0b\xa6\xec\xfa\xd5\xf3\xab\x53\xab\x47\x2a\x07\x73\xe0\
\x5f\xaa\xbf\x9c\xc0\x79\xdc\xbb\x61\xbb\x07\x61\x65\x03\xfd\x72\
\xc3\x50\xdc\x7d\x95\x02\x16\xc2\x11\xd5\xdf\x36\x8c\xa2\x5e\x43\
\x39\x98\xb1\x5f\xab\x3e\x39\x81\xf3\x58\xf7\xd1\x15\x61\x65\x70\
\x40\xc3\xa4\xa0\x3f\x69\x98\x44\x0b\x2c\x96\x63\xaa\x13\x36\xe2\
\x1b\x21\xac\xc2\x79\xd5\x23\xaa\x8b\x47\x7e\x1e\x3f\x51\xdd\x40\
\x58\x59\x5f\x07\x57\x6f\xae\x7e\x4a\x29\x60\xa1\x5d\xbb\x7a\x57\
\xc3\x30\x36\xcc\xca\x09\xd5\x1f\x8f\xfc\x1c\xb6\x54\x4f\x10\x56\
\xd6\xcf\x01\x0d\x9b\x4a\xdd\xcd\xeb\x03\xa8\x0e\xac\x5e\xdf\x3a\
\xee\x24\x0b\x7b\xe1\xb7\x1a\xe6\x53\x8d\xd9\xcf\x56\xdb\x84\x95\
\xf5\xf1\x32\x41\x05\xf8\x1e\x5f\x62\xfe\xa5\x75\x1e\xc6\x86\x55\
\x38\xbf\x7a\x5c\xb5\x3c\xe2\x73\x38\xac\x75\x5c\x24\x6e\x91\xc3\
\xca\xaf\xb4\x81\xab\xed\x01\xa3\x76\xd5\x86\xf5\x58\xb6\x2a\x05\
\x33\x72\x5c\xf5\x92\x91\x9f\xc3\x13\x85\x95\xb5\xb9\x51\xf5\x3c\
\xaf\x05\x60\x17\x6e\x53\x3d\x53\x19\x98\xa1\xa7\x57\x67\x8d\xf8\
\xf8\xef\x5c\xdd\x52\x58\xd9\x7b\x9e\xfa\x01\xf6\xc4\x53\x73\x3b\
\x88\xd9\xf9\x7a\xc3\xfc\x95\x31\x5b\x97\xd1\x95\x45\x0c\x2b\xf7\
\xc8\x3a\x2a\xc0\x9e\xd9\x56\x3d\x47\x19\x98\xa1\x3f\xad\x3e\x3e\
\xe2\xe3\x7f\x78\xeb\x30\xd1\x76\x11\xc3\xca\xd3\xf4\x7d\x60\x15\
\x1e\xd6\xb0\xe2\x2d\xcc\xc2\xc5\x0d\x8b\xc5\x8d\xd5\x21\xd5\x03\
\x84\x95\xd5\x39\x32\xa3\x2a\xc0\xea\x6c\x69\x3a\xfb\xb6\x30\x4e\
\x6f\x69\x58\x65\x79\xac\x7e\x5a\x58\x59\xfd\x37\x24\x7b\xfd\x00\
\xab\xf5\x70\x25\x60\xc6\x9e\xd5\x78\x1f\x65\xbe\x5f\x75\xa8\xb0\
\xb2\xe7\x7e\x4c\x7f\x07\xf6\xc2\x0d\xaa\x9b\x2a\x03\x33\x74\x42\
\xf5\xcf\x23\x3d\xf6\xad\xad\x71\xa9\x90\x45\x0a\x2b\xfb\x57\x77\
\xd0\xdf\x81\xbd\x74\x77\x25\x60\xc6\x9e\x5d\x6d\x1f\xe9\xb1\xff\
\x8c\xb0\xb2\x67\x6e\x5e\xed\xa7\xaf\x03\x7b\xe9\xb6\x4a\xc0\x8c\
\x7d\xac\x7a\xe5\x48\x8f\xfd\xae\xd5\x75\x84\x95\xdd\xbb\xa1\x7e\
\x0e\x78\x0f\x61\xe4\x7e\xa7\x71\xce\x5d\x59\xaa\x7e\x5c\x58\xd9\
\xbd\x23\xf4\x71\xc0\x7b\x08\x23\xf7\xf1\xea\x4d\x23\x3d\x76\x61\
\x65\x0f\x1c\xac\x8f\x03\x6b\x70\x65\x25\x60\x4e\xfc\xd1\x48\x8f\
\xfb\x5e\x7b\xfb\x3a\x5a\xa4\xb0\xb2\x8f\xfe\x0d\xac\xc1\x16\x25\
\x60\x4e\xbc\xb3\xfa\xd0\x08\x8f\x7b\x6b\x7b\xb9\xd6\xd9\x22\x7d\
\x80\x7f\x47\xff\x06\xd6\xe0\xdb\x4a\xc0\x1c\x19\xeb\xe8\xca\x03\
\x85\x95\x5d\xfb\xaa\xbe\x0d\x78\x0f\x61\x22\x5e\x53\x7d\x65\x84\
\xc7\x7d\xbf\x6a\x5f\x61\x65\xe7\x4e\xd3\xb7\x01\xef\x21\x4c\xc4\
\x45\xd5\xdf\x8c\xf0\xb8\xaf\x52\xdd\x51\x58\xd9\xb9\x8f\xea\xdb\
\x80\xf7\x10\x26\xe4\xaf\x1b\xe7\x63\xcc\x3f\x24\xac\xec\xdc\x99\
\xd5\xa7\xf5\x6d\x60\x2f\xbd\x57\x09\x98\x33\xa7\x56\xc7\x09\x2b\
\xd3\xf3\x66\x7d\x1b\xd8\x0b\xe7\x54\xef\x57\x06\xe6\xd0\x5f\x8d\
\xf0\x98\x7f\xb0\x61\x0b\x1c\x61\x65\x27\x5e\xa5\x5f\x03\x7b\xe1\
\x5f\x1a\xe6\x08\xc0\xbc\x79\x6d\x75\xee\xc8\x8e\x79\xff\xea\x2e\
\xc2\xca\xce\xbd\xa7\xfa\x94\xbe\x0d\xac\xd2\x4b\x94\x80\x39\xf5\
\x9d\xea\xf5\x23\x3c\xee\x55\xdd\x0a\x5a\xb4\xb0\xb2\x5c\xbd\x40\
\xdf\x06\x56\xe1\x43\x8d\x73\x5e\x00\x7b\xee\xea\xd5\xcd\xaa\x3b\
\x54\xb7\xab\x6e\xdc\xb8\x56\x3d\x3f\x76\x84\x35\xbf\xe7\x6a\x7e\
\x78\xdf\x05\xec\x94\x2f\xad\x9e\x5a\x1d\xe9\xf5\x09\xec\x81\x67\
\x29\xc1\xa4\xec\x57\xdd\xad\xba\x4f\xc3\xad\x88\x5b\xb6\xf3\x25\
\xe0\xbf\x52\x9d\xd8\x30\x2a\xff\xaf\xd5\x47\xe6\xf4\x9c\xfe\xad\
\x3a\xbb\x71\x6d\x09\x71\xdb\x95\x0c\x72\xf1\x1e\xfd\xf4\xf2\xf2\
\xf2\x65\x6d\x81\x3c\xb8\x61\x94\x45\xd3\x34\x6d\x57\xed\x0d\x23\
\x7d\x8f\x3b\x6e\x44\x35\xfe\xeb\x4d\xa8\xc7\xb5\xab\xc7\x55\xff\
\xbc\xf2\xa1\xbe\xb7\xc7\x7a\x72\xf5\x2b\xd5\x41\x73\x78\xcd\xff\
\x76\x84\xaf\xaf\xdb\xee\x2c\x8f\x5c\xbe\x2d\xea\x7e\x39\xff\x5c\
\xbd\xd2\x17\x0c\x60\x17\xbe\x55\x3d\x51\x19\x46\x69\xdf\xea\xee\
\xd5\xef\x57\x1f\xae\x4e\xaf\xfe\xb2\x7a\x50\x6b\xbb\xbd\x73\xb3\
\xea\x85\xd5\xe7\xaa\x27\xaf\x8c\xd2\xcc\x8b\xd7\x8d\xf0\x3a\xed\
\xf1\xe2\x70\x8b\xbc\xb9\xdf\xcf\x57\x9f\xf1\x9a\x06\x76\xe2\x31\
\xd5\xe7\x95\x61\x34\x0e\xaf\x1e\xbd\xf2\x45\xf4\xcc\xea\x5d\xd5\
\xd3\x1b\x6e\xf3\xac\xb7\xab\x56\x7f\x5c\x9d\xd4\x30\xcf\x65\x1e\
\xbc\xb5\x3d\xbd\xa5\x22\xac\x8c\xca\x37\xab\x07\x54\x67\x79\x8d\
\x03\x97\xf3\xec\xea\x9f\x94\x61\xae\xed\x53\x1d\x5d\x3d\xa7\xfa\
\x7f\xd5\x19\xd5\xff\xad\x1e\xd2\xb0\xa4\xfb\x66\xb8\x59\xf5\xbe\
\x86\x5b\x43\xb3\xf6\xad\xc6\xb7\x16\xd0\x1e\x87\x95\x7d\x17\xbc\
\xb3\x9f\x52\x1d\xd3\xb0\x58\xdc\x21\x5e\xfb\x40\xc3\x13\x83\xcf\
\x51\x86\xb9\x74\xb5\xea\xbe\x0d\x9b\xe1\xdd\xa7\x3a\x6c\x0e\x8e\
\x69\xdf\x86\x5b\x43\x37\xae\x7e\xa9\xda\x3e\xc3\x63\xf9\xb7\x86\
\xc9\xc3\x63\x71\xd3\x86\xf9\x3f\xbb\x5d\x27\x66\x1f\x7d\xbf\x0f\
\x56\xf7\xa8\xbe\xa8\x14\xb0\xd0\x96\x1b\x9e\xfc\x79\xb2\x52\xcc\
\x8d\xa5\x86\x47\x89\x9f\xb5\x32\x6a\xf0\xd5\xea\xe5\xd5\x23\xe6\
\x24\xa8\xec\xe8\x89\x0d\xab\xc9\x2e\xcd\xf0\x18\xde\x34\xb2\xeb\
\xbb\x4f\x75\xeb\x3d\x4d\x84\x0c\x13\xb0\x6e\x5f\xbd\xa2\x55\x3e\
\xfb\x0d\x4c\xc2\x59\xd5\xcf\x36\xac\x54\xcb\x6c\x5d\xa5\x61\xd4\
\xe4\x7e\x0d\xa3\x28\x87\x8f\xe8\xd8\x1f\x53\x7d\xa9\xd9\x3d\xee\
\x7e\x52\xf5\x8d\x86\x39\x35\x63\x71\xb3\xf6\x60\xdf\x2d\x23\x2b\
\xdf\xf5\xe5\xea\xde\xd5\xaf\x35\xec\x03\x02\x2c\x86\xd7\x56\xb7\
\x10\x54\x66\xea\x96\x0d\x93\x61\xdf\xdd\x30\x39\xf6\xd8\xea\x51\
\x23\x0b\x2a\x97\x7a\x66\xf5\xc0\x19\xfd\xee\xe5\x86\x39\x34\x63\
\x72\xb3\x3d\x3b\xb3\xc5\x5c\x67\x65\x77\xae\x59\xbd\xa8\x3a\x2f\
\xeb\x4c\x68\xda\x54\xdb\xf1\x2b\x5f\x50\xa6\xe8\xb8\x11\x5d\x87\
\x4b\x26\xd8\xb7\xbe\x56\x5d\x63\x46\xd7\xfe\x69\x23\xab\xd5\x9b\
\xbf\x57\x1e\xb1\xce\xca\x9e\x8f\xb2\x3c\xa9\xba\x5e\xf5\xeb\xd5\
\x09\x4a\x02\x93\x70\x66\xc3\x7a\x1b\x47\x37\xac\x5e\xfa\x76\x25\
\x99\xb9\x29\x7e\x0e\x5d\xad\xfa\xa3\x19\xfd\xee\xe3\xa7\x38\xb2\
\xb2\xb4\xe3\x88\xca\xd2\xd2\x92\x97\xcd\xce\x5d\xab\x61\x91\xa1\
\xdb\x54\x37\xaa\x8e\x68\x58\xda\x78\xbf\x66\x3b\xa1\x0a\xb8\xa2\
\x4b\x1a\x46\x46\xcf\xac\x4e\xab\x3e\xd6\x30\x3c\x7e\x62\xb3\x7d\
\x5a\x63\x33\x47\x56\xee\xa1\x1b\xcc\xd4\x72\xc3\xe4\xe0\x13\x37\
\xf9\xf7\x6e\x6b\x78\x8c\x79\xdb\x88\x6a\x75\xe5\xea\x9c\x5d\xde\
\xe1\x71\x1b\x08\x60\x92\x61\xc5\xad\xbe\xd9\xb7\x57\xcf\xe8\xfa\
\x7f\x60\x64\x75\xba\xd3\xe5\xf3\x88\xdb\x40\x00\xb0\x39\x1e\xd4\
\xb0\x2f\xd1\x66\xfb\xc8\xc8\xea\xf4\xfd\xbb\xfb\x01\x61\x05\x00\
\x36\xc6\x96\x86\x35\x61\x36\xdb\x47\x47\x56\xa7\xeb\x09\x2b\x00\
\x30\x3b\x0f\x9e\xc1\xef\x1c\xdb\xc8\xca\x75\x85\x15\x00\x98\x9d\
\x3b\x36\x2c\x29\x2f\xac\xec\x9c\x91\x15\x00\x98\xa1\x2d\xd5\x6d\
\x37\xf9\x77\x9e\xd5\xb8\xb6\x90\x11\x56\x00\x60\xc6\x6e\x3a\x83\
\xdf\xf9\x19\x61\x05\x00\xd8\x53\xd7\x9d\xc1\xef\x3c\x6d\x44\xf5\
\x39\xa4\x61\xad\x15\x61\x05\x00\x66\xe4\xd0\x19\xfc\xce\xcf\x8e\
\xac\x46\x87\x0b\x2b\x00\x30\x3b\xfb\xcd\xe0\x77\x9e\x36\xb2\x1a\
\x5d\x55\x58\x01\x80\xd9\xb9\x60\x06\xbf\xf3\xb3\x23\xab\xd1\xa1\
\xc2\x0a\x00\xcc\xce\x59\x33\xf8\x9d\xa7\x8f\xac\x46\x46\x56\x00\
\x60\x86\x3e\x37\x83\xdf\xf9\x75\x61\x05\x00\xd8\x53\x27\xcf\xe0\
\x77\x7e\xab\xba\x58\x58\x01\x00\x76\xe7\x82\xea\xa4\x19\xfc\xde\
\xe5\xea\x1b\x23\xaa\x93\x39\x2b\x00\x30\x23\xc7\x57\xe7\xcf\xe8\
\x77\x8f\xe9\x56\xd0\x41\xc2\x0a\x00\xcc\xc6\x3f\xcd\xf0\x77\x8f\
\x29\xac\x6c\x13\x56\x00\x60\xf3\x5d\x50\xfd\xe3\x0c\x7f\xff\xf9\
\x23\xaa\xd5\x56\x61\x05\x00\x36\xdf\x3f\x34\xdb\xd1\x8d\x0b\x46\
\x54\x2b\x61\x05\x00\x36\xd9\xc5\xd5\xef\xcf\xf8\x18\x2e\x14\x56\
\x00\x80\x9d\xf9\xb3\xea\xd3\x33\x3e\x06\x23\x2b\x00\xc0\xf7\xf4\
\x85\xea\x37\xe6\xe0\x38\x84\x15\x00\xe0\x0a\x2e\xae\x7e\xba\x3a\
\x67\x0e\x8e\x65\xfb\x88\xea\xb6\x24\xac\x00\xc0\xe6\x78\x72\xf5\
\x9e\x39\x39\x96\x6d\x23\xaa\xdb\x05\xc2\x0a\x00\x6c\xbc\xe7\x56\
\x2f\x9a\xa3\xe3\x11\x56\x00\x80\xcb\x3c\xbb\xf9\x98\xa7\x32\xd6\
\xb0\xb2\xcb\x27\x97\xf6\xd5\xbf\x00\x26\xe7\x12\x25\xd8\x34\xe7\
\x55\x8f\xaf\xfe\x7e\x0e\x8f\x6d\xeb\x88\xea\x78\x81\xb0\x02\xb0\
\x58\xce\x55\x82\x4d\xf1\xfe\xea\xd1\xd5\xa7\xe6\xf4\xf8\xdc\x06\
\x02\x60\x6e\x9d\xa1\x04\x1b\xea\xf3\xd5\x63\xaa\xbb\xcc\x71\x50\
\xa9\xba\xea\x88\x6a\xea\x36\x10\xc0\x82\x39\x55\x09\x36\xc4\xfb\
\xaa\x3f\xaf\x8e\x6d\x1c\xab\xc3\x5e\x6d\x44\xb5\x75\x1b\x08\x60\
\xc1\x9c\xa0\x04\xeb\xe2\x3b\xd5\x3b\xab\x37\x55\xaf\x6b\x18\x51\
\x19\x93\x31\x85\x95\xb3\x85\x15\x80\xc5\xf2\xc1\x95\x6f\xfe\x5b\
\x95\x62\xd5\xfe\xb3\x7a\xe3\x4a\x7b\x67\xe3\xda\xb9\x78\x47\x07\
\x35\xae\x39\x2b\x5f\x13\x56\x00\x16\xcb\xb7\xab\xe3\xaa\xfb\x28\
\xc5\x6e\x5d\x50\xbd\xbb\x61\xf4\xe4\x8d\xd5\x27\x27\x72\x5e\x87\
\x8d\xec\x78\xbf\x2e\xac\x00\x2c\x9e\x7f\x10\x56\x76\xea\xf3\x3b\
\x84\x93\xb7\xaf\x84\xbb\xa9\x39\x5c\x58\x01\x60\xde\xbd\xaa\x7a\
\x41\x75\xa8\x52\x74\x51\x75\xfc\x4a\x38\x79\x53\xf5\xb1\x05\x38\
\xe7\x23\x47\x76\xbc\x6e\x03\x01\x2c\xa0\xf3\x1a\x96\x7e\xff\xcd\
\x05\x3d\xff\x33\xfa\xee\xe8\xc9\x5b\xdb\xcd\x04\x4e\x61\x65\xe6\
\x76\x39\xb2\xd2\xf2\xf2\xf2\x65\x0d\x80\x49\x39\xa4\x3a\xb3\x5a\
\x5e\x80\x76\x71\xf5\xde\xea\x19\xd5\x6d\x5c\xfa\xfe\x72\x64\xd7\
\xef\x90\x1d\xf3\xc8\xe5\x9b\xb0\x02\x30\x6d\x8f\x99\x70\x40\xf9\
\x6a\xf5\xb7\xd5\xc3\x72\xbb\xeb\xf2\xde\x32\xa2\xeb\x78\xe1\xe5\
\x07\x4f\x84\x15\x80\xc5\xf3\x86\x89\x84\x93\x4b\x1a\x1e\xcb\x7e\
\x76\x75\x87\x6a\xc9\xa5\xdd\xa9\x53\x47\x74\x5d\x4f\x15\x56\x00\
\xb8\x5a\x75\xda\x48\x03\xca\xd7\xab\x57\x54\x8f\xac\xae\xee\x52\
\xee\x91\x2b\xad\x04\xbb\xb1\x5c\xe3\xb7\xed\x2e\xac\x98\x60\x0b\
\x30\x7d\x5f\xaf\xee\x57\xbd\xa7\xf9\x5f\xd5\x74\xb9\x3a\xa9\xef\
\x2e\xcc\xf6\xc1\xec\x22\xbd\x5a\x3f\xd0\xb8\xf6\xfe\x3b\x6d\x77\
\x3f\x20\xac\x00\x2c\x86\x53\x1a\xd6\x5d\x79\x73\xf3\xb7\x60\xd8\
\xd9\x0d\x4f\xec\x5c\xfa\x68\xb1\x8d\x18\xd7\xe6\x96\x23\x3b\x5e\
\x61\x05\x80\xcb\x9c\xd0\xb0\x53\xf0\xeb\xab\x1b\xcd\xf8\x58\x3e\
\xd6\x77\x47\x4f\x8e\x6f\x78\x9a\x87\xc5\x0c\x2b\x9f\x15\x56\x98\
\x47\xdb\xaa\x9b\x56\x47\x35\xac\xb2\x78\x60\xb5\x45\x59\x58\x47\
\xdb\x1b\xf6\x74\x39\x73\xe5\x5b\xdb\xc9\xd5\x39\xca\x52\xd5\xa7\
\xaa\x3b\x56\x7f\x51\x3d\x74\x13\x7f\xef\xb9\x0d\xab\xc5\x5e\xba\
\xf6\xc9\x17\x5c\x8a\x0d\x73\x8b\x91\x1d\xef\x6e\x47\x56\x4c\xb0\
\x65\xb3\xdc\xa8\x7a\x66\xc3\x16\xeb\x17\xb4\x18\xeb\x3e\x68\xf3\
\xd3\xb6\x57\x1f\xa9\xfe\xa8\x3a\xda\xcb\xf1\x32\x0f\x6a\xd8\xb8\
\x6f\xa3\xea\xfe\x89\xea\xf9\xd5\x0f\x67\x53\xc5\xcd\xb2\x4f\xf5\
\xad\x91\xbd\x3e\xaf\x79\xf9\x3c\xe2\x69\x20\x36\xdb\xfd\x1b\x76\
\x2e\xf5\x81\xa9\xcd\x53\x3b\xb9\x7a\x5c\xb5\x9f\x97\x68\x5b\xab\
\x27\x56\x9f\x5e\x87\xba\x9e\xd7\xf0\x98\xf4\x2f\x55\x37\x50\xda\
\x99\xb8\xd5\xc8\x5e\x8b\x67\xb7\xf2\x08\xba\xb0\xc2\x2c\xdc\x7e\
\x65\x14\xc5\x07\xa3\x36\xcf\xed\x3f\xab\x07\x7b\xb9\xd6\xca\x07\
\xc6\xbd\xaa\x3f\x6b\x98\x43\xb0\xa7\x35\xfc\x54\xc3\xb2\xfe\xc7\
\x54\xfb\x2b\xe3\xcc\xfd\xe2\xc8\x5e\x83\xef\xbb\xf4\xc0\x3d\xba\
\xcc\x66\xda\xb7\x7a\x4e\xf5\xd4\xcc\x43\x61\xfe\x1d\x59\xfd\x53\
\xf5\xea\xea\xf1\xd5\x59\x0b\x5c\x8b\xe5\x95\x51\xd0\x77\xae\xfc\
\xfb\xb5\xab\x5b\x57\xdf\xdf\x77\xe7\x96\x6d\x6f\x98\x7b\xf2\xe5\
\xea\x33\xd5\x89\xd5\x57\x74\xa3\xb9\x72\xd7\x91\x1d\xef\x47\xf6\
\xac\x77\x1a\x59\x61\xfd\x1c\x56\xbd\xcb\xb7\x75\x6d\xc4\xa3\x2c\
\x37\xf7\x32\x66\xe4\x3e\x3f\xb2\xd7\xdd\x2f\xec\xc9\xc8\xca\x3e\
\xae\x2b\xeb\xe4\x3a\x0d\x8f\x1f\xde\x5d\x29\x18\xf1\x28\xcb\x7b\
\x1b\x1e\xed\x85\xb1\xf6\xe1\xeb\x8e\xec\x98\x3f\xba\x27\x3f\x24\
\xac\xb0\x1e\x0e\x6f\x18\x3a\xbe\x91\x52\x30\x72\x87\x34\x3c\x5a\
\x7b\x07\xa5\x60\x84\x8e\x19\xe1\x31\xef\xd1\x6d\x20\x61\x85\xb5\
\xda\xbf\x61\x81\xa9\xef\x57\x0a\x26\xe2\xe0\xea\x5f\x47\xf8\x0d\
\x15\xc6\x16\x56\x3e\xdf\xf0\x98\xb5\xb0\xc2\x86\x7b\x81\x6f\xa1\
\x4c\xd0\x35\xaa\x57\x66\xe1\x4c\xc6\x63\x5b\xf5\x43\x23\x3b\xe6\
\x0f\xef\xe9\x0f\x0a\x2b\xac\xc5\x8f\x56\x4f\x50\x06\x26\xea\xe8\
\x86\xa7\xda\x60\x0c\xee\xde\xb0\xdb\xf2\x98\x7c\x40\x58\x61\xa3\
\x6d\xad\x5e\xac\x0c\x4c\xdc\x33\xab\xeb\x29\x03\x23\x70\xff\x11\
\x1e\xf3\xf1\xc2\x0a\x1b\xed\xe7\x32\x4f\x85\xe9\x3b\xa0\x7a\xb6\
\x32\x30\xe7\xf6\xa9\x1e\x32\xb2\x63\xbe\xa8\xfa\xf7\x3d\xfd\xe1\
\xa5\x1d\xd7\x57\x59\x5a\x5a\x72\xc9\xd9\x13\x5b\xaa\x53\xab\xeb\
\x2b\x05\x0b\xe0\xa2\x86\xa5\xe3\x4f\x57\x0a\xe6\xd4\xdd\x1b\xd6\
\xb8\x1a\x93\x0f\x76\xb9\x7d\xba\x76\xb5\xde\x9b\x91\x15\xf6\xc6\
\x7d\x05\x15\x16\xc8\x7e\x0d\x23\x89\x30\xaf\x7e\x6a\x84\xc7\x7c\
\xfc\x6a\x7e\x58\x58\x61\x6f\x3c\x42\x09\xd0\xe7\x61\x2e\x6c\xa9\
\x7e\x52\x58\x81\x2b\xbe\x30\x8e\x51\x06\x16\xcc\x0d\x57\x1a\xcc\
\x9b\x1f\x6a\x58\x98\x73\x6c\xde\x2b\xac\xb0\x91\x6e\x51\x1d\xaa\
\x0c\x2c\xa0\x7b\x2a\x01\x73\x68\x8c\xb7\x28\x4f\xae\xbe\x2a\xac\
\xb0\x91\x6e\xab\x04\xe8\xfb\x30\x17\x0e\xab\x1e\x34\xc2\xe3\xfe\
\xb7\xd5\xfe\x07\xc2\x0a\xab\x65\xff\x1f\xf4\x7d\x98\x0f\x8f\x6a\
\x58\xf3\x6a\x6c\xde\x24\xac\xb0\xd1\xae\xad\x04\xe8\xfb\x30\x17\
\xc6\x78\x0b\xe8\xdc\xea\x3d\xc2\x0a\x1b\xed\x60\x25\x60\x41\x5d\
\x59\x09\x98\x23\xf7\xac\x6e\x32\xc2\xe3\x7e\x47\x75\xa1\xb0\xc2\
\x46\xd3\x67\x58\x54\x5b\x94\x80\x39\xf2\xeb\x23\x3d\xee\x37\xed\
\xcd\x7f\xe4\x83\x87\xd5\xfa\x8e\x12\xb0\xa0\xce\x53\x02\xe6\xc4\
\xcd\xaa\xfb\x09\x2b\xb0\x73\x67\x2a\x01\xfa\x3e\xcc\xd4\x53\xaa\
\x31\xee\x8f\xf3\xd1\xea\x73\xc2\x0a\x9b\xe1\xb3\x4a\xc0\x82\x3a\
\x4d\x09\x98\x03\xd7\xac\x7e\x7a\xa4\xc7\x7e\xec\xde\xfe\x87\xc2\
\x0a\xab\xf5\x11\x25\x60\x41\x7d\x54\x09\x98\x03\x4f\xa9\xb6\x09\
\x2b\xb0\x6b\xff\x5e\x6d\x57\x06\x16\xd0\xf1\x4a\xc0\x8c\x5d\xab\
\xfa\xc5\x91\x1e\xfb\x89\xd5\xa9\xc2\x0a\x9b\xe5\x9b\x2b\x81\x05\
\x16\xc9\xb7\xab\xf7\x29\x03\x33\xf6\x8c\xea\x80\x91\x1e\xfb\xb1\
\x6b\xf9\x8f\x85\x15\xf6\xc6\xab\x94\x80\x05\xf3\xfa\xea\x02\x65\
\x60\x86\xae\x5b\x3d\x6e\xc4\xc7\xff\x4a\x61\x85\xcd\xf6\xf2\xf6\
\x62\x51\x1f\x18\xb1\x97\x2a\x01\x33\xf6\xcc\xc6\x3b\x57\xe5\xdf\
\x5b\xe3\x04\x75\x61\x85\xbd\xf1\xd5\xea\x1f\x94\x81\x05\xf1\x91\
\xea\xad\xca\xc0\x0c\xdd\xbc\x7a\xec\x88\x8f\x7f\xcd\x9f\x17\x4b\
\xcb\xcb\xcb\xdf\xfd\x97\xa5\x25\x5d\x82\x3d\x75\x54\xf5\xf1\xc6\
\xb9\x89\x16\xac\xc6\x83\xab\xd7\x2a\x03\x33\xf4\xb6\xea\xde\x23\
\x3d\xf6\x0b\xaa\x23\xaa\x6f\xec\xee\x07\x77\xcc\x23\x97\x67\x64\
\x85\xbd\xf5\x99\xea\x85\xca\xc0\xc4\xbd\x5d\x50\x61\xc6\x1e\x34\
\xe2\xa0\x52\xf5\x9a\x3d\x09\x2a\xbb\x63\x64\x85\xb5\xb8\x52\x75\
\x42\x75\x63\xa5\x60\x82\xce\xad\x6e\xbd\x12\xcc\x61\x16\xb6\x36\
\x8c\x60\x1f\x35\xe2\x73\xb8\x57\x75\xdc\x9e\xfc\xa0\x91\x15\x36\
\xca\x79\xd5\xc3\xb2\x5f\x10\xd3\xf4\x04\x41\x85\x19\xfb\xf5\x91\
\x07\x95\x4f\xef\x69\x50\xd9\x1d\x61\x85\xb5\x3a\xa9\x7a\x74\xb5\
\xac\x14\x4c\xc8\x1f\x56\x7f\xaf\x0c\xcc\xd0\x8d\xaa\x67\x8d\xfc\
\x1c\xfe\x7a\xdd\xfe\xa6\xe5\xe5\xe5\xcb\x1a\xac\xc1\xe3\x1b\x56\
\xb6\x5d\xd6\xb4\x91\xb7\xbf\x6c\x9c\x9b\xc4\x31\x1d\x4b\xd5\xbb\
\x46\xfe\x3a\xba\xb0\xba\xc6\xde\xe6\x91\xcb\x37\x61\x85\xf5\xf4\
\xd3\x0d\x33\xbf\x7d\xe0\x69\x63\x6d\xcf\x13\x54\x98\x93\x2f\x7f\
\x63\x7f\x2d\xbd\x62\x2d\x83\x27\xc2\x0a\x1b\xed\xe8\x86\x2d\xc0\
\x7d\xf0\x69\x63\x6a\xe7\x34\xde\x9d\x6c\x99\x96\x23\x1a\xb6\x35\
\x19\xfb\x6b\xea\x76\xc2\x0a\xf3\xee\x2a\xd5\x5f\xe5\xb6\x90\x36\
\x8e\xf6\xd6\xc6\x3d\x89\x91\xe9\x58\x5a\xe9\x8f\x63\x7f\x4d\xbd\
\x73\x6f\x4e\x5e\x58\x61\x56\x6e\x5f\xbd\xc9\x87\xa1\x36\xa7\xed\
\xa4\xea\x81\x5e\xa6\xcc\x91\xa7\x4c\xe4\xb5\x75\x7f\x61\x85\x31\
\xba\x45\xf5\xbf\xab\x33\x7c\x40\x6a\x33\x6e\x67\x37\x3c\xe5\x73\
\x6f\x2f\x4b\xe6\xcc\xad\x9b\xc6\x9c\xbf\x93\xdb\xcb\x79\x5f\xbb\
\x0a\x2b\x16\x85\x63\xb3\x87\x38\x6f\x5d\xdd\xad\xba\x55\xc3\xd0\
\xfb\x35\xab\x83\xaa\x7d\x95\x87\x75\x74\x49\xc3\x3a\x40\x67\x36\
\x6c\xa0\xf6\xb1\xea\x7d\xd5\xfb\xb3\x09\x27\xf3\xe7\x80\xea\x43\
\xd5\x4d\x27\x70\x2e\x8f\x6d\x2f\x37\xfe\xdc\xe5\xa0\x89\x91\x15\
\x00\x98\xa9\x97\x35\x8d\x91\xcb\x33\x5a\xc3\x7e\x71\x6e\x03\x01\
\xc0\x7c\xfa\x85\xa6\x73\x9b\xf5\x57\xd7\x52\x08\x61\x05\x00\xe6\
\xcf\x0f\x36\xdc\x96\x9c\x42\x50\x39\xbd\xda\x5f\x58\x01\x80\xe9\
\x38\xbc\xfa\xe2\x84\x46\x55\x7e\x61\xad\x05\x11\x56\x00\x60\x7e\
\xec\xdf\x30\xe1\x7b\x2a\x41\xe5\xb3\xad\x61\xae\x8a\xb0\x02\x00\
\xf3\x65\xa9\x7a\x55\xd3\x5a\x12\xe0\xb1\xeb\x51\x18\x61\x05\x00\
\xe6\xc3\x1f\x4e\x2c\xa8\x9c\xda\x3a\x2d\x3d\x21\xac\x00\xc0\xec\
\xfd\x7c\xd3\x5b\x68\xf1\x11\xeb\x55\x1c\x61\x05\x00\x66\xeb\x27\
\xab\x8b\x27\x16\x54\xde\xdf\x3a\xee\x52\x2e\xac\x00\xc0\xec\xdc\
\xaf\xe9\x3c\xa2\x7c\x69\xdb\x5e\xdd\x71\x3d\x8b\x24\xac\x00\xc0\
\x6c\xdc\xab\xfa\x4e\xd3\xbb\xfd\xf3\xb2\xf5\x2e\x94\xb0\x02\x00\
\x9b\xef\xe8\xea\x9c\x09\x06\x95\x73\xab\x23\x84\x15\x00\x18\xb7\
\x7b\x4c\x34\xa8\x2c\x57\xbf\xb1\x11\x05\x13\x56\x00\x60\xf3\x1c\
\xd3\xb0\xeb\xf7\x14\x83\xca\x7f\xb6\xc6\x65\xf5\x85\x15\x00\x98\
\xad\xff\x56\x5d\x30\xd1\xa0\xb2\x5c\xdd\x77\xa3\x0a\x27\xac\x00\
\xc0\xc6\x7b\x4c\xd3\x7b\x3c\x79\xc7\xf6\xf2\x8d\x2c\x9e\xb0\x02\
\x00\x1b\x67\xa9\xfa\xbd\x09\x87\x94\xe5\xea\xcc\xea\x30\x61\x05\
\x00\xc6\x67\x5b\xf5\xf7\x13\x0f\x2a\xcb\xd5\x4f\x6f\x74\x21\x85\
\x15\x00\x58\x7f\x57\xab\xde\xb5\x00\x41\xe5\x8d\x9b\x51\x4c\x61\
\x05\x00\xd6\xd7\x6d\xab\xd3\x16\x20\xa8\x9c\x5b\x7d\x9f\xb0\x02\
\x00\xe3\xf2\xb3\x4d\x73\x55\xda\xef\xd5\x1e\xb7\x59\x45\x15\x56\
\x00\x60\xed\xb6\x56\x7f\xbe\x20\x21\x65\xb9\x7a\xcd\x66\x16\x57\
\x58\x01\x80\xb5\xb9\x71\xf5\xa1\x05\x0a\x2a\xa7\x57\x57\x15\x56\
\x00\x60\x1c\x7e\xbe\xfa\xf6\x02\x05\x95\xed\xd5\xbd\x37\xbb\xc8\
\xc2\x0a\x00\xac\xde\x61\xd5\x6b\x17\x28\xa4\x5c\xda\xfe\xd7\x2c\
\x8a\x2d\xac\x00\xc0\xea\x3c\xb4\xfa\xf2\x02\x06\x95\x13\x1a\xe6\
\xe6\x08\x2b\x00\x30\xa7\xae\x53\xbd\x6e\x01\x43\xca\x72\x75\x56\
\x75\xd4\xac\x0a\x2f\xac\x00\xc0\xae\xed\x53\xfd\x42\x75\xf6\x82\
\x06\x95\xed\xd5\xfd\x67\x79\x01\x84\x15\x00\xd8\xb9\xbb\x35\xdc\
\xfe\x58\x5e\xe0\xf6\x5b\xb3\xbe\x08\xc2\x0a\x00\x5c\xd1\xf7\x55\
\xaf\x5c\xf0\x90\xb2\x5c\xbd\xa1\x61\x33\x46\x61\x05\x00\xe6\xc4\
\x55\x1a\x76\x49\x5e\x94\x55\x68\x77\xd5\x4e\xad\x0e\x9d\x87\x8b\
\x22\xac\x00\x40\x1d\x5c\x3d\xab\x61\x22\xe9\xb2\xd6\xb9\xd5\x2d\
\xe7\xe5\xe2\x08\x2b\x00\x2c\xb2\x03\xab\xa7\x56\x5f\x13\x50\x2e\
\x6b\x17\x57\x3f\x36\x4f\x17\x49\x58\x01\x60\x11\x5d\xb3\xe1\x76\
\xcf\xd7\x85\x93\x2b\xb4\x5f\x9c\xb7\x8b\x25\xac\x00\xb0\x48\x6e\
\x5a\xbd\xa4\x3a\x5f\x28\xf9\x9e\xed\xf9\xf3\x78\xd1\x84\x15\x00\
\xa6\x6e\x5b\xf5\xb0\xea\xed\x0d\x6b\x86\x08\x25\x3b\xdf\x49\x79\
\x1f\x61\x05\x00\x36\x77\x14\xe5\xf9\x99\x8f\xb2\x27\xed\x83\xd5\
\x01\xf3\x7a\x21\x85\x15\x00\xa6\xe4\xc8\xea\x69\x59\xc8\x6d\x35\
\xed\x94\xea\xea\xf3\x7c\x51\x85\x15\x00\xc6\xee\x86\xd5\x53\xaa\
\x7f\x17\x3c\x56\xdd\x3e\x53\x1d\x31\xef\x17\x78\x57\x61\x65\x5f\
\xfd\x1f\x80\x39\x74\xa5\xea\x5e\xd5\x31\xd5\x7d\x9b\xe1\x06\x7b\
\x23\xf7\xc5\xea\xde\xd5\x97\xc6\x7c\x12\xc2\x0a\x00\xf3\xe0\x90\
\xea\xce\xd5\x5d\xab\xbb\x54\x77\xaa\xf6\x57\x96\x35\xf9\xea\x4a\
\x50\xf9\xec\xd8\x4f\x44\x58\x01\x60\xb3\x1d\xda\xb0\x72\xea\x2d\
\xab\x5b\x54\x47\x57\x3f\xd0\x9c\x3e\xa5\x32\x52\x67\x55\xf7\xa9\
\x3e\x39\x85\x93\x11\x56\x00\xd8\xa8\x40\x72\xe4\x4a\xbb\xfe\xca\
\x9f\x47\xad\x84\x93\x6b\x2b\xcf\x86\xfa\x66\xc3\xad\xb3\x0f\x4f\
\xe5\x84\x84\x15\x98\xbd\xa5\x95\x37\xf3\x1b\x36\x4c\x82\x3b\xc8\
\x6b\x73\xcd\xb6\x37\x2c\x08\x76\x66\x75\x5a\xf5\x89\x95\x7f\x67\
\x75\x9f\x0f\x37\xaa\x6e\x50\x1d\xde\x30\x87\x64\xdb\x4a\xdb\xba\
\xc3\x3f\x6f\xab\xae\x56\x1d\x76\xb9\x3f\xdd\xc2\x99\x8d\x33\x1b\
\x46\x54\x4e\x9a\x5a\x67\x04\x36\xdf\x61\xd5\x4f\x56\xf7\x6f\xb8\
\x47\x7f\x15\x25\xd9\x50\x17\x57\x27\x56\x6f\xa9\x5e\x3d\xb5\x37\
\xf2\x75\x74\x54\xf5\x90\x86\x49\xad\x77\x14\x38\x46\xe7\x4b\xd5\
\x0f\x37\x3c\xa6\x3c\x2d\x1e\x5d\x86\x4d\x75\x9b\xea\x1f\xaa\x0b\
\xf2\x38\xe5\x2c\xdb\x09\xd5\x23\xab\x2d\xba\x64\x55\xf7\xcb\xca\
\xaf\x63\x6f\xa7\x35\x8c\x82\x4d\x22\x8f\x58\x67\x05\x66\xe3\xba\
\xd5\xb1\xde\x50\xe7\xae\x7d\x62\x65\x14\x61\x51\xdd\xae\x3a\x5e\
\x3f\x98\x44\x3f\xbe\xce\x94\x06\x4f\x84\x15\xd8\x7c\x3f\x5b\x9d\
\xed\x0d\x75\xae\xdb\xdf\x55\x57\x5e\xa0\x3e\xb9\x5f\xf5\xdc\x86\
\xdb\x63\xae\xff\xb8\xdb\x87\xaa\x6b\x4c\xa1\x53\x0a\x2b\x30\x1b\
\x5b\xab\x97\x7a\x33\x1d\x4d\xfb\x64\x75\xe3\x05\xe8\x97\x57\xaf\
\xde\xe3\x7a\x4f\xa2\xbd\xbe\x3a\x70\x2a\x1d\x53\x58\x81\xcd\x77\
\xa5\xea\xad\xde\x4c\x47\xd7\xbe\x5e\xdd\x61\xc2\xfd\xf2\x7a\xd5\
\xa7\x5d\xe7\x49\xb4\x17\x35\xb1\x39\x57\xc2\x0a\x6c\xae\x6d\xd5\
\xdb\xbc\x99\x8e\xb6\x7d\xb3\xba\xf5\x04\xfb\xe5\xb5\x1a\xf6\x88\
\x71\x8d\xc7\xdd\x2e\xa9\x7e\x6d\x8a\x6f\x9c\xc2\x0a\x6c\xae\xbf\
\xf3\x86\x3a\xfa\xf6\xc5\x46\xb0\xf1\xdb\x2a\x1c\xd0\x30\xb7\xc1\
\xb5\x1d\x77\x3b\xaf\x7a\xf0\x54\xdf\x38\x85\x15\xd8\x3c\x4f\xf4\
\x86\x3a\x99\xf6\xee\xa6\x33\xcc\xfe\x12\xd7\x73\xf4\xed\xf3\xd5\
\xed\xa7\xfc\xe6\x29\xac\xc0\xe6\x38\x6a\xe5\x9b\x8f\x37\xd6\xe9\
\xb4\xff\x31\x81\x7e\xf9\x00\xd7\x71\xf4\xed\xed\x0d\x13\xa3\x27\
\x4d\x58\x81\xcd\xf1\x06\x6f\xaa\x93\x6b\xe7\x36\xee\x7d\x6c\xf6\
\xaf\x3e\xe7\x3a\x8e\xba\xfd\x41\x0b\xb2\x78\xe1\xae\xc2\x8a\x1d\
\x2e\x61\x7d\xdc\xbd\x61\x15\x50\xa6\xe5\xc0\xea\x37\x47\x7c\xfc\
\xbf\xd0\xf0\x04\x10\xe3\x73\x4e\xc3\x96\x1c\x4f\x6b\x98\x54\xbb\
\xd0\x96\x76\x1c\x51\x59\x5a\x5a\xd2\x3d\x60\xef\xbc\xb1\xc5\x5e\
\x09\x75\xca\x2e\x6c\xd8\x68\xf2\x8c\x91\x1d\xf7\x7e\xd5\x67\x9b\
\xd6\x44\xe1\x45\x71\x4a\xf5\x13\x4d\x71\x8f\x9f\x5d\xd8\xd5\x1d\
\x1e\x23\x2b\xb0\x76\x47\x36\x6c\xc7\xce\x34\x6d\xad\x7e\x6e\x84\
\xc7\xfd\x40\x41\x65\x94\xfe\xa2\x61\x22\xed\x29\x4a\x21\xac\xc0\
\x7a\x7a\x78\x65\x58\x72\xda\x1e\xe1\x98\xd9\x60\x5f\x5b\x09\x98\
\x4f\x68\x98\xa8\xcf\x0e\xdc\x06\x82\xb5\x7b\x7f\x75\xb4\x32\x4c\
\xde\xf7\x37\x2c\xaa\x36\x06\xdb\x1a\x56\xe3\x3d\xd0\x65\x1b\x85\
\xb7\x54\x8f\xaa\xbe\xbc\xc8\x45\x70\x1b\x08\x36\xce\x95\x9a\xf8\
\xda\x07\x5c\xe6\x9e\x23\x3a\xd6\xdb\x09\x2a\xa3\x70\x41\xf5\xe4\
\x86\xdb\xc8\x5f\x56\x8e\x84\x15\xd8\x20\xb7\xac\xf6\x55\x86\x85\
\x70\x5b\xc7\xca\x3a\x3a\xbe\xba\x4d\xf5\x82\x86\x47\x94\x11\x56\
\x60\xc3\xdc\x44\x09\x16\xc6\x8d\xf5\x4b\xd6\xc1\x39\xd5\x2f\x56\
\x77\xcb\x24\xda\x3d\xe6\x1b\x21\xac\xcd\xb5\x94\x60\x61\x5c\x5b\
\xbf\x64\x8d\x5e\xdf\xb0\xf6\xcd\xe9\x4a\xb1\x3a\x46\x56\x60\x6d\
\x0e\x51\x02\xd7\xda\xb1\xb2\x1b\x5f\xa9\x1e\x5a\xfd\xb8\xa0\x22\
\xac\x80\xd7\x10\x1b\x69\x8b\x7e\xc9\x2a\x5d\x54\xfd\x71\x75\xa3\
\xea\x95\xca\xb1\xf7\xdc\x06\x82\xb5\x39\x5f\x09\x16\xc6\x77\xf4\
\x4b\x56\xe1\x5f\x1b\x9e\xf4\xf9\xb4\x52\x08\x2b\x30\x6b\x67\x2a\
\x81\x6b\xed\x58\xd9\xc1\xc7\xab\x5f\x6b\x58\x3b\x85\x75\x62\xa8\
\x10\xd6\xe6\xb3\x4a\xb0\x30\x4e\xd3\x2f\xd9\x85\xaf\x56\x4f\xaa\
\x6e\x25\xa8\xac\x3f\x23\x2b\xb0\x36\x1f\x55\x02\xd7\xda\xb1\x2e\
\xb4\x6f\x54\x7f\x58\xbd\xb8\xfa\xb6\x72\x08\x2b\x30\xaf\xdf\x60\
\xbf\x5c\x5d\x53\x29\x26\xef\x7d\x23\x3a\xd6\x0f\xb8\x5c\x1b\xee\
\x5b\xd5\xf3\xab\x17\x56\x67\x2b\xc7\xc6\x72\x1b\x08\xd6\xce\x90\
\xef\xf4\x9d\xd7\xb0\xe2\xe8\x58\x9c\xde\x30\x77\x82\xf5\x77\x6e\
\xf5\x7b\x0d\xbb\xad\x3f\x47\x50\x11\x56\x60\x2c\x5e\xad\x04\x93\
\xf7\x86\xc6\xf7\x84\x8d\x7e\xb9\xbe\xce\xa8\x9e\x51\x5d\xaf\x7a\
\x66\x75\x96\x92\x6c\xa2\xe5\xe5\xe5\xcb\x1a\xb0\x57\xf6\x5d\x79\
\x23\x5b\xd6\x26\xdb\x8e\x19\x61\xbf\x3c\xb2\xba\xc4\xb5\x5b\x73\
\x3b\xb9\xfa\xd9\x6a\xab\xb7\xba\xcd\xcb\x23\x97\x6f\xc2\x0a\xac\
\x8f\xdf\xf0\xa6\x3e\xe9\x0f\xab\xa5\x91\xf6\xcb\x7f\x76\xfd\xf6\
\xba\xbd\xa3\xba\xdf\x88\xaf\xbd\xb0\x02\x5c\xc1\x95\x1b\xd6\xb6\
\xf0\x26\x3f\xbd\xf6\x90\x11\xf7\xcb\x5b\x1b\x5d\x59\x55\xfb\x7a\
\xc3\x84\xd9\x1f\xf0\x96\x26\xac\xc0\x54\xfd\xbc\x37\xfb\xc9\xb5\
\x77\x4d\xa0\x5f\xfe\x95\xeb\xb8\xdb\xf6\xce\xea\x11\xd5\x36\x6f\
\x63\xc2\x0a\x4c\xdd\x52\x75\x9c\x37\xfe\xc9\xb4\x6f\x57\x37\x9e\
\x40\xbf\x3c\xb4\xe1\xe9\x20\xd7\xf4\xbf\xb6\xd3\xab\x3f\xa8\x6e\
\xe8\xad\x4b\x58\x81\x45\x73\x9d\x86\x95\x2c\x7d\x18\x8c\xbf\x3d\
\x66\x42\xfd\xf2\xee\x0d\x9b\xea\x2d\xfa\x35\x3d\xa3\x7a\x51\x75\
\xd7\xcc\x45\x11\x56\x60\xc1\xdd\xb5\xe1\x31\x57\x1f\xf8\xe3\x6d\
\x7f\x32\xc1\x7e\xf9\x84\x05\xbd\x96\x5f\xad\xfe\xac\xba\x57\x96\
\xeb\x10\x56\x80\xff\xe2\xc7\xaa\x0b\x7c\xe8\x8f\xb2\xbd\x6c\xc2\
\xdf\xba\x9f\xb1\x20\xd7\xf0\xa4\xea\xf7\x57\x46\x94\xac\xd4\x2e\
\xac\x00\xbb\xf0\x23\x0d\x4b\x72\x0b\x00\xe3\x1a\x51\x99\xfa\xed\
\x81\x27\x35\xbd\x27\x84\xce\xaa\x5e\xd9\xb0\x1e\xca\xb5\xbc\xf5\
\x08\x2b\xc0\xea\xdc\xb4\x61\x9d\x0e\x41\x60\xbe\xdb\x79\xd5\x63\
\x17\xa8\x5f\xfe\x68\xe3\x9e\x5b\xf5\x95\xea\x9f\xaa\xa7\x54\x47\
\x1b\x3d\x11\x56\x80\xb5\x3b\xa0\xfa\x5f\x99\xe0\x38\xaf\xed\x3d\
\x2b\xa1\x72\xd1\x5c\xb3\x7a\xd5\x08\xae\xcf\xc5\x2b\x81\xff\xaf\
\xab\x47\xe7\xe9\x1d\x61\x05\xd8\xf0\x51\x96\x57\x67\x91\xae\x79\
\x69\xa7\x54\x0f\xd5\x2d\xbb\x57\xc3\x26\x8d\xf3\x32\x19\xf6\x6d\
\x0d\xbb\x19\x3f\xba\xba\x5d\xb5\xbf\x4b\x24\xac\x2c\xed\x18\x52\
\x96\x96\x3c\xc9\x05\x9b\xe0\x86\xd5\xe3\xab\x87\x35\x3c\xea\xcc\
\xe6\x39\xbf\x7a\x63\xf5\xd2\x95\x3f\x7d\x4b\xfb\xae\x3b\x57\x3f\
\x57\x3d\xb8\xba\xca\x06\xfd\x8e\xed\xd5\x17\xab\xd3\x56\xda\x67\
\x77\xf8\xe7\x4f\x55\x5f\x76\x19\x16\x3b\xac\xec\x8c\xb0\x02\xb3\
\xb3\x54\xdd\xb2\xe1\x89\x85\xdb\xac\x84\x98\x23\xaa\x83\xab\xfd\
\xb2\x0e\xc4\x5a\x5c\xd2\x30\x0f\xe5\xcc\x95\x0f\xc2\x8f\x55\xef\
\xab\xde\x5d\x7d\x47\x79\x76\x69\xdf\x86\x79\x20\x77\xad\x6e\x55\
\x1d\x55\x1d\x5e\x1d\xb8\x12\xee\x2e\x6c\x78\xd2\xed\x82\xcb\xfd\
\xf3\x05\xd5\x37\x1b\x96\xac\xff\xda\xf7\xf8\xf3\xcc\xea\x0b\x0d\
\xb7\x43\x61\xef\xc3\x0a\x00\xc0\xbc\xb1\x40\x0e\x00\x20\xac\x00\
\x00\x08\x2b\x00\x80\xb0\x02\x00\x20\xac\x00\x00\x08\x2b\x00\x80\
\xb0\x02\x00\x20\xac\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\x20\xac\
\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\x08\x2b\x00\x00\xc2\x0a\x00\
\x80\xb0\x02\x00\x08\x2b\x00\x00\xc2\x0a\x00\x20\xac\x00\x00\x08\
\x2b\x00\x00\xc2\x0a\x00\x20\xac\x00\x00\x08\x2b\x00\x80\xb0\xa2\
\x04\x00\x80\xb0\x02\x00\x20\xac\x00\x00\xc2\x0a\x00\x80\xb0\x02\
\x00\x20\xac\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\x08\x2b\x00\x00\
\xc2\x0a\x00\x80\xb0\x02\x00\x08\x2b\x00\x00\xc2\x0a\x00\x20\xac\
\x00\x00\x08\x2b\x00\x00\xc2\x0a\x00\x20\xac\x00\x00\x08\x2b\x00\
\x80\xb0\x02\x00\x20\xac\x00\x00\x08\x2b\x00\x80\xb0\x02\x00\x20\
\xac\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\x20\xac\x00\x00\xc2\x0a\
\x00\x80\xb0\x02\x00\x20\xac\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\
\x08\x2b\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\x08\x2b\x00\x00\xc2\
\x0a\x00\x20\xac\x00\x00\x08\x2b\x00\x00\xc2\x0a\x00\x20\xac\x00\
\x00\x08\x2b\x00\x80\xb0\x02\x00\x20\xac\x00\x00\x08\x2b\x00\x80\
\xb0\x02\x00\x20\xac\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\x20\xac\
\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\x20\xac\x00\x00\xc2\x0a\x00\
\x80\xb0\x02\x00\x08\x2b\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\x08\
\x2b\x00\x00\xc2\x0a\x00\x20\xac\x00\x00\x08\x2b\x00\x00\xc2\x0a\
\x00\x20\xac\x00\x00\x08\x2b\x00\x80\xb0\x02\x00\x20\xac\x00\x00\
\x08\x2b\x00\x80\xb0\x02\x00\x20\xac\x00\x00\xc2\x0a\x00\x80\xb0\
\x02\x00\x20\xac\x00\x00\xc2\x0a\x00\x80\xb0\x02\x00\xb0\xe2\xff\
\x0f\x00\xfd\x93\x38\x5b\x26\xb4\x72\xf3\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0f\x4c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xc8\x00\x00\x00\xc8\x08\x06\x00\x00\x00\xad\x58\xae\x9e\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x0e\xee\x49\x44\x41\x54\x78\x9c\xed\
\x9d\x79\x8c\x5e\x55\x19\x87\x9f\xaf\xb3\xb4\x54\x28\x2d\x25\xb2\
\x55\x2c\x14\x0a\x08\x14\x8a\x7b\xd9\xb1\x20\x24\xca\x26\x20\x61\
\xab\x28\x86\x44\x54\x44\x50\x63\xd4\x28\x06\x14\x09\x6a\xdc\x08\
\x89\xec\x4b\x40\x76\xd9\x0a\x22\x88\x81\x42\xd9\x0a\xb4\xc8\x56\
\xa0\x83\x54\x68\x85\x52\xb6\x92\xce\x74\x19\xff\x78\x67\xe8\x48\
\x66\x3a\xf7\xdc\x7b\xcf\x7d\xef\xf2\x7b\x92\x5f\xbe\x7f\xbe\x7b\
\xcf\xf9\xce\x39\xef\x77\xb6\xf7\xbc\x07\x84\x10\x42\x08\x21\x84\
\x10\x42\x08\x21\x84\x10\x42\x08\x21\x84\x10\x42\x08\xb1\x86\x96\
\x77\x06\x6a\x4c\x3b\x30\x01\x98\x08\x6c\xd1\xf7\xb9\x31\x30\xbe\
\x4f\x1b\x02\xe3\x80\x91\x40\xe7\x00\x01\xf4\x0c\xd0\x72\xe0\x4d\
\xe0\x75\x60\x49\xdf\xe7\x22\xa0\x6b\x80\x16\x02\x2b\xe3\xfe\x9c\
\x66\x22\x03\xc9\x87\x4d\x80\x5d\x80\x29\x03\x34\x19\x33\x92\x22\
\x58\x09\x3c\x0b\xcc\xed\xd3\x3c\xe0\x51\xcc\x90\x44\x06\x64\x20\
\xe1\xb4\x80\xed\x81\xdd\x81\x69\xc0\xae\x58\x0f\x51\x46\x5e\x04\
\xee\x07\x66\x01\xf7\x02\x4f\x01\xbd\xae\x39\x12\xb5\x64\x7d\xe0\
\x30\xe0\x7c\x6c\x38\xd3\x5b\x51\xbd\x0c\xfc\x19\x38\x14\x18\x93\
\x6b\x09\x89\xc6\x31\x0e\x38\x1e\xb8\x1d\x58\x81\x7f\xe3\xce\x5b\
\x2b\x80\xdb\x80\xaf\x00\x63\xf3\x29\x32\x51\x77\x3a\x81\x23\x80\
\x5b\xb0\x09\xb2\x77\x23\x2e\x4a\xdd\xc0\x4d\x58\x2f\xd9\xbf\x50\
\x20\xc4\xfb\x6c\x03\x9c\x03\xbc\x86\x7f\x63\xf5\xd6\x62\xe0\x6c\
\x6c\x91\x41\x34\x98\x16\xb0\x3f\x70\x27\xfe\x8d\xb2\xac\xba\x03\
\xd8\x17\x2d\xe6\x34\x8a\x4e\x6c\x6e\x31\x0f\xff\x06\x58\x15\x3d\
\x01\xcc\x40\xc3\xaf\x5a\xd3\x01\x7c\x0d\x78\x09\xff\x06\x57\x55\
\x2d\xc0\x26\xf5\x45\xed\xef\x88\x02\x68\x03\x8e\x05\x9e\xc7\xbf\
\x81\xd5\x45\xcf\x02\x47\x01\x23\x02\xea\x41\x94\x90\x3d\x80\x39\
\xf8\x37\xa8\xba\xea\x61\x6c\xa3\x54\x54\x8c\x89\xc0\xd5\xf8\x37\
\xa0\xa6\xe8\x4a\x60\xf3\x24\x15\x23\x7c\x69\x07\xbe\x07\xbc\x87\
\x7f\xa3\x69\x9a\x96\x01\xdf\xc5\x86\xb4\xa2\x84\x4c\x45\xc3\xa9\
\x32\xe8\x21\xcc\x59\x53\x94\x84\x0e\xe0\x0c\xcc\xa3\xd5\xbb\x71\
\x48\xa6\x15\xc0\xe9\x68\xb5\xcb\x9d\xc9\xd8\x44\xd1\xbb\x41\x48\
\x83\x6b\x36\x30\x69\xc8\xda\x13\xd1\x68\x01\x27\x60\xe3\x5e\xef\
\x46\x20\xad\x5d\xef\x60\x7b\x27\xda\x8d\x2f\x88\xd1\xc0\x65\xf8\
\x57\xbc\x14\xa6\x0b\x81\x75\x06\xa9\x4f\x91\x23\x5b\x02\x8f\xe3\
\x5f\xd9\x52\x3a\x3d\x8a\x2d\xc1\x8b\x08\xec\x07\x2c\xc5\xbf\x92\
\xa5\x6c\x5a\x02\xec\x83\xc8\x95\x13\xd0\x2a\x55\x9d\xb4\x02\x9b\
\x97\x88\x8c\x8c\x00\xce\xc4\xbf\x42\xa5\x38\x3a\x1d\x4d\xde\x53\
\xd3\x81\x26\xe3\x4d\xd0\xc5\x68\xbf\x24\x98\x91\xc0\x0d\xf8\x57\
\x9e\x54\x8c\xae\x45\x67\x4d\x12\x33\x1a\x0b\x94\xe0\x5d\x69\x52\
\xb1\xba\x15\x2d\x03\x0f\xcb\x68\xe0\x6e\xfc\x2b\x4b\xf2\xd1\x5d\
\xc8\x48\x86\x64\x24\xea\x39\x24\xeb\x49\x34\xdc\xfa\x00\x1d\x68\
\xce\x21\xad\xd1\x35\x68\xe2\xfe\x3e\x23\x80\xcb\xf1\xaf\x14\xa9\
\x5c\xba\x88\x12\x2c\x01\x97\xe1\x70\xcb\x19\xc0\x49\xde\x99\x10\
\xa5\x63\xe7\xbe\xcf\x7b\x3c\x33\xe1\xcd\x09\xf8\xff\x53\x49\xe5\
\xd6\x0c\x1a\xca\x7e\xc8\x7d\x44\x1a\x5e\x2b\x70\xf4\xdd\xf2\x1a\
\xe3\x6d\x89\x79\x76\x2a\x68\xb2\x48\xc2\x12\xe0\xe3\x58\x4c\xb3\
\x42\xf1\x88\x6b\x34\x1a\xb8\x1e\x19\x87\x48\xce\x78\xe0\x3a\x60\
\x54\xd1\x09\x17\x3d\x49\x6f\x61\x07\x67\xf6\x2b\x38\x5d\x51\x7d\
\x36\xed\xd3\xcd\xde\x19\x89\x89\x26\xe5\x52\x56\xcd\xa0\x40\x8a\
\x9c\x83\x4c\x06\x1e\xc3\x86\x58\x42\xa4\xe5\x5d\x6c\x09\xf8\x85\
\x22\x12\x2b\x6a\x0e\xd2\x01\x5c\x81\x8c\x43\x64\x67\x5d\xac\x2d\
\x75\x14\x91\x58\x51\x73\x90\x9f\x03\x47\x16\x94\x96\xa8\x3f\x13\
\xb0\xe1\xd6\x3d\xb1\x13\x2a\x62\x88\x35\x15\x8b\x5d\x55\x86\x5d\
\x7b\x51\x1f\x56\x62\x4b\xbf\x73\x63\x26\x12\x7b\x88\xd5\x8e\xdd\
\x0c\x2b\xe3\x10\x79\x53\x48\xdb\x8a\xdd\x70\x4f\x05\x8e\x8b\x9c\
\x86\x68\x2e\x9b\x61\x91\x6e\x66\xc7\x4a\x20\xe6\x10\x6b\x22\x76\
\x71\xbd\x0e\xc0\x88\x98\xbc\x07\x6c\x8b\xdd\x01\x9f\x3b\x31\x87\
\x58\x67\x23\xe3\x10\xf1\x19\x0d\x9c\x15\xeb\xe5\xb1\x7a\x90\x3d\
\x80\x7f\x46\x7a\xb7\x10\x83\x31\x0d\x78\x20\xef\x97\xc6\x30\x90\
\x36\x6c\xd5\x6a\x6a\x84\x77\x0b\x31\x14\x0f\x03\x9f\x01\x56\xe7\
\xf9\xd2\x18\x43\xac\xa3\x90\x71\x88\xe2\xf9\x24\x70\x44\xde\x2f\
\xcd\xbb\x07\xe9\x00\x9e\x46\x77\x42\x08\x1f\x9e\x05\x76\xc0\xf6\
\x48\x72\x21\xef\x83\xf1\xc7\x51\x7f\xe3\x98\x0f\x5c\x85\x9d\x67\
\x59\x0a\x8c\x03\x3e\x81\xf5\x9c\x5b\x3a\xe6\x4b\xc0\x36\x58\x3d\
\x5c\xea\x9d\x91\xc1\xe8\x04\xba\xf0\xf7\xf6\x8c\xa5\x1e\xe0\x9b\
\x0c\xbd\x77\xd4\x0e\x9c\x0c\xbc\x5d\x82\xbc\x36\x59\x2f\x50\x90\
\x9f\x56\x28\xc7\xe3\x5f\x38\x31\x75\x78\xc2\x72\xd8\x18\xb8\xa4\
\x04\xf9\x6d\xb2\x8e\x19\xb6\x96\x0a\xa6\x05\x3c\x89\x7f\xc1\xc4\
\xd2\xd5\x29\xca\x64\x1a\xba\x75\xd7\x4b\x8f\x53\x82\x90\x41\x03\
\xd9\x1f\xff\x42\x89\xa9\xbd\x52\x96\x4b\x1b\x70\x22\xf0\x7a\x09\
\x7e\x43\xd3\x34\x3d\x41\xfd\x14\xc6\x9d\xf8\x17\x48\x2c\xad\x22\
\xfb\x59\xe8\x0d\x80\x3f\xf6\xbd\xcb\xfb\xf7\x34\x45\xb7\x27\xaa\
\x99\x02\xd8\x06\xff\xc2\x88\xa9\xd7\xf2\x2b\x2a\xa6\x60\x67\x18\
\xbc\x7f\x53\x53\xb4\x55\xa2\x5a\x59\x0b\x79\x6c\x14\x7e\x3d\x87\
\x77\x94\x99\xee\x1c\xdf\x35\x17\xd8\x1b\x3b\x3c\xb6\x30\xc7\xf7\
\x8a\xc1\x39\xc1\x3b\x03\x23\xb1\x7f\x58\xef\x7f\x8a\x98\x5a\x90\
\x5b\x69\xfd\x3f\x1f\xc2\xc2\xae\x76\x97\xe0\x37\xd6\x55\x8b\xc8\
\xb8\xe4\x9b\xb5\x07\x39\x08\xd8\x30\xe3\x3b\xca\x4e\x6e\xbb\xb2\
\x1f\x60\x19\xf0\x63\xe0\x63\xc0\x4d\x91\xd2\x68\x3a\x1b\x01\x5f\
\xcc\xf2\x82\xac\x06\xd2\x84\xc3\x50\xab\x22\xbf\xff\x05\xec\x8f\
\xe6\x00\xe0\xb9\xc8\x69\x35\x11\xb7\x36\x3a\x0e\xdb\x5d\xf6\xee\
\x46\x63\xeb\xa9\xbc\x0a\x2c\x01\x9d\xc0\x69\x68\x37\x3e\x4f\x75\
\x03\xeb\x87\x54\xc2\x40\xb2\xf4\x20\x07\x53\xd2\x2d\xfd\x9c\xc9\
\xd5\x7d\x7a\x18\x7a\x80\x73\xb0\x95\xc1\x52\xfa\x13\x55\x90\x4e\
\xe0\xc0\xb4\x0f\x67\x31\x90\x2f\x67\x78\xb6\x4a\xf4\x3a\xa4\xf9\
\x2a\x16\x41\x70\x1a\xe6\x14\x29\xb2\x51\x78\x5b\x1d\x8b\x85\xa5\
\xf7\xee\x3e\x8b\xd0\x13\x39\x95\x59\x5a\xda\xb0\xe5\xca\xba\xaf\
\x16\xc6\x54\x0f\xb0\x5e\x68\xc1\x43\xfa\x1e\x64\x3a\xcd\xb9\x43\
\xae\xd7\x39\xfd\x55\x58\x78\x9b\xc9\xc0\xef\x89\xbf\x68\x50\x47\
\x3a\x48\x79\xc7\x48\x5a\x03\xd9\x3f\xe5\x73\x55\xc4\xdb\x40\xfa\
\x59\x8a\xb9\xd3\x4f\xa5\xe1\xd7\x92\xa5\xe4\x80\xa2\x12\x6a\x61\
\xbb\xc0\xde\xdd\x66\x51\x9a\x93\x4f\xb1\xe5\x4a\x0b\x73\xbf\xff\
\x37\xfe\xe5\x53\x15\x75\x91\xc2\xc3\x37\x4d\x0f\xb2\x3d\x16\xb0\
\xab\x29\xf4\x7a\x67\x60\x10\x7a\xb1\xab\x92\xb7\xc5\xe2\x1e\xe7\
\xe9\x0e\x53\x57\x3e\x8a\x0d\x53\x83\x48\x63\x20\xbb\xa7\x78\xa6\
\xca\x94\xd1\x40\xfa\x79\x0f\xf8\x29\xb0\x1d\x70\xa3\x73\x5e\xaa\
\x40\x70\xdb\x4d\x63\x20\xd3\x52\x3c\x53\x65\xca\x6c\x20\xfd\x2c\
\x00\x0e\x01\x3e\x0f\x3c\xe3\x9c\x97\x32\xb3\x6b\xe8\x03\x69\x0c\
\x24\x38\x91\x8a\x53\x05\x03\xe9\xe7\x6f\x98\x4b\xfd\xa9\xc0\x3b\
\xce\x79\x29\x23\xbb\xc5\x4e\x60\x13\xfc\x27\x5b\x45\xeb\xa1\x5c\
\x4a\xae\x78\x36\x06\x2e\xc2\xbf\xfc\xca\xa6\x8d\x42\x0a\x31\xb4\
\x07\xd9\x25\xf0\xfb\xc2\x8f\x45\x58\x20\x8d\xcf\x02\x8f\x38\xe7\
\xa5\x4c\x04\x05\x35\x0c\x35\x90\x29\x81\xdf\x17\xfe\xcc\x06\x3e\
\xcd\x9a\xdd\xf8\xa6\x13\xd4\x86\x65\x20\xcd\x60\x35\x70\x01\xda\
\x8d\x07\xd8\x31\xe4\xcb\x32\x90\x66\xf1\x26\xb6\x1b\xbf\x33\xf0\
\x0f\xe7\xbc\x78\x11\xad\x0d\xb7\xd3\x1c\x07\xc5\x81\xaa\xea\x24\
\x7d\x38\x5a\x58\xb0\xe7\x97\xf1\x2f\xe3\x22\xd5\x4d\xc0\xcd\x6a\
\x21\x3d\xc8\x04\x9a\xe3\xa0\xd8\x04\x7a\xb1\x80\x78\xdb\x02\x67\
\x62\x1e\xaf\x4d\xa0\x13\x5b\x8d\x4d\x44\x88\x81\x4c\x0c\xce\x8a\
\xa8\x02\xfd\x67\xe3\xb7\x07\x6e\x71\xce\x4b\x51\x6c\x91\xf4\x8b\
\x21\x06\x92\xf8\xa5\xa2\x92\x3c\x8f\x05\x38\xf8\x02\x76\x4e\xbe\
\xce\x44\x31\x90\x89\xe1\xf9\x10\x15\xe4\x56\xec\x8e\x8d\x1f\x61\
\xbe\x5e\x75\x24\x8a\x81\x6c\x9c\x22\x23\xa2\x9a\x2c\x07\x7e\x81\
\x39\x41\x5e\xe3\x9c\x97\x18\x24\xde\x4d\x0f\x31\x90\xf1\x29\x32\
\x22\xaa\xcd\xbf\xb1\x95\xae\xe9\xd4\xcb\x09\x32\x71\x5b\x96\x81\
\x88\x24\xdc\x05\xec\x04\xfc\x80\x7a\x0c\xbb\xa2\x18\x48\xdd\x23\
\x28\x8a\xb5\xd3\x03\x9c\x8d\x45\x82\xbc\xcb\x39\x2f\x59\x49\xdc\
\x96\x43\x0c\x64\x5c\x8a\x8c\x88\xfa\xf1\x12\x76\xee\xe4\x32\xef\
\x8c\x64\x20\x71\x5b\x0e\x31\x90\x91\x29\x32\x22\xea\xc9\x2a\xcc\
\xf9\xf1\x71\xef\x8c\xa4\x24\x71\x5b\x0e\x31\x90\xce\x14\x19\xa9\
\x03\xa5\xba\xca\xab\x44\xf4\x60\x4b\xc1\x55\x44\x06\x92\x23\x32\
\x90\xa1\xb9\x03\x78\xcb\x3b\x13\x29\x90\x81\x88\x42\x58\x45\x35\
\x23\xd2\x47\x31\x90\xa6\xa2\x1e\x64\xed\xd4\xba\x7c\x42\x0c\xa4\
\x29\xde\x9e\x22\x39\x6d\xc0\xd6\xde\x99\x48\x41\xe2\x38\x62\x32\
\x10\x91\x85\x3d\xc8\x70\xf7\x86\x23\x32\x10\x11\x9d\x16\xf0\x43\
\xef\x4c\xa4\x24\x8a\x81\x2c\x4f\x91\x11\x51\x5f\xbe\x0a\xec\xeb\
\x9d\x89\x94\x44\x31\x90\x37\x53\x64\xa4\x0e\xd4\x7a\x12\x9a\x92\
\x03\x80\x73\xbd\x33\x91\x81\xa5\x49\xbf\x18\x62\x20\xaf\xa7\xc8\
\x48\x1d\x90\x81\xac\x61\x14\x76\x3c\xf7\x16\xaa\xbd\xec\x9f\xb8\
\x2d\x87\x9c\x31\x5f\x92\x22\x23\xa2\x3e\xec\x03\x9c\x47\x35\x57\
\xad\x3e\x48\xe2\xb6\xac\x1e\x44\x0c\xc7\xa6\xc0\x95\x98\x07\x6f\
\x1d\x8c\x03\x02\xda\x72\x88\x81\x2c\x4a\x91\x11\x51\x5d\x3a\x80\
\x53\xb0\x83\x52\x47\x3a\xe7\x25\x6f\x16\x27\xfd\x62\xc8\x10\xab\
\x2b\x3c\x1f\xb5\xa0\x89\x73\x90\xbd\x81\x3f\x60\x91\x4e\xea\xc8\
\x82\xa4\x5f\x0c\xe9\x41\xba\xc2\xf3\x51\x0b\x9a\x64\x20\x13\x80\
\xab\x80\xbb\xa9\xaf\x71\x40\x40\x5b\x56\x0f\x32\x3c\x4d\x30\x90\
\x51\xc0\x77\x31\xf7\xf5\xd1\xce\x79\x29\x82\xc4\x3d\x48\x88\x81\
\x2c\x04\x56\x06\x3e\x53\x07\xea\x6c\x20\x2d\x2c\x16\xd6\x6f\x81\
\x2d\x9d\xf3\x52\x14\x3d\xc0\x2b\x49\xbf\x1c\x32\xc4\x5a\x09\x3c\
\x1b\x9c\x9d\xea\x53\x57\x03\xd9\x16\x98\x09\xfc\x95\xe6\x18\x07\
\xc0\xd3\x58\xb4\xfb\x44\x84\xba\xbb\xcf\x0d\xfc\xbe\x28\x1f\xeb\
\x03\xbf\x06\xe6\x61\x67\xcb\x9b\xc6\xbc\x90\x2f\xcb\x40\x86\xa7\
\x2e\x67\x66\x46\x60\xfe\x53\xcf\x61\xf3\x8d\xa6\x0d\x95\xfb\x09\
\x6a\xc3\xa1\x85\x14\x64\x7d\x35\xa1\x0e\x43\xac\x69\xd8\xc5\x39\
\x1f\xf7\xce\x48\x09\x88\xda\x83\x3c\x1a\xf8\xfd\x3a\x50\x65\x03\
\xd9\x0c\xb8\x1c\x98\x85\x8c\xa3\x9f\x39\x21\x5f\x0e\x35\x90\x45\
\xc0\x8b\x81\xcf\x54\x9d\x2a\x1a\xc8\x28\x6c\xc9\xf6\x39\xe0\x68\
\xe7\xbc\x94\x89\xf9\xc0\x7f\x43\x1e\x48\x33\xbe\xbe\x3f\xc5\x33\
\x55\xa6\x4a\x06\xd2\x02\x0e\xc5\x56\x6a\xce\xa0\x19\x7b\x1a\x21\
\xcc\x0a\x7d\x20\x8d\x81\x04\x27\x52\x71\xaa\x62\x20\x3b\x02\x7f\
\x07\xae\x43\x57\x55\x0c\xc5\x7d\xa1\x0f\xa4\x31\x90\x7b\x53\x3c\
\x53\x65\xca\x6e\x20\xe3\x81\x3f\x61\x51\x0e\xf7\x71\xce\x4b\xd9\
\x29\xa4\xed\xb6\xb0\x5d\x75\xef\xcb\x18\x8b\x52\x59\x6f\x5b\xea\
\x00\xbe\x0d\xbc\x81\x7f\x19\x55\x41\x0b\x48\xf1\x67\x97\xa6\x07\
\xe9\xc5\x76\x60\x9b\x42\x19\x7b\x90\xfd\x81\x27\x80\xdf\xa1\xa0\
\xe2\x49\x99\x89\xb5\xdd\x20\xd2\x6e\x82\xc9\x40\x7c\xd8\x06\x3b\
\xee\x3a\x13\xbb\xfd\x49\x24\xe7\xf6\x22\x13\x1b\x43\x73\xee\x4c\
\xef\xca\xa7\xc8\x32\x31\x16\xf8\x0d\xcd\x29\xf3\xbc\xd5\x0d\xac\
\x1b\x5c\xea\xa4\xef\x41\xde\x06\xee\x4c\xf9\x6c\xd5\xf0\xec\x41\
\xda\x80\x13\xb1\xf5\xfb\x53\x68\xae\x7b\x48\x56\xee\x00\xde\x4d\
\xf3\x60\x16\x3f\xa3\xab\x33\x3c\x5b\x25\xbc\x7c\xb1\xf6\xc6\x3c\
\x17\xce\x43\xb7\x7b\x65\xc5\xa5\xad\x8e\xc5\x7c\xeb\xbd\xbb\xcf\
\xd8\x5a\x98\x57\x81\x25\x64\x12\x70\x7d\x4e\x79\x97\x2c\xe0\xe1\
\x98\xa0\x1a\x18\x40\x96\x7f\xc7\x37\xb1\xae\xab\xee\x14\xd5\x83\
\x8c\x01\x7e\x05\x3c\x05\x1c\x52\x50\x9a\x4d\x60\x26\x36\x25\x48\
\x45\xd6\xca\xbf\x24\xe3\xf3\x55\x20\xf6\x1c\xa4\x0d\xbb\xce\x6c\
\x3e\xf0\x7d\xaa\x1d\x90\xad\x8c\xb8\xb6\xd1\x4e\xcc\xf9\xcb\xbb\
\x1b\x8d\xa9\x20\xe7\xb6\x40\xf6\x04\x1e\x2b\xc1\x6f\xac\xab\x5e\
\xc5\x36\x54\x53\x93\xb5\x07\xe9\x01\x2e\xce\xf8\x8e\xb2\x13\xa3\
\x07\xd9\x02\xb8\x16\xb8\x07\xd8\x39\xc2\xfb\x85\x71\x31\xb6\x34\
\xee\xca\x64\xfc\xff\x29\x62\xea\x8d\xfc\x8a\x8a\x31\xc0\x59\xd8\
\xba\xbc\xf7\xef\x6a\x82\x26\x25\xab\x96\xf8\xdc\x81\x7f\x61\xc4\
\xd2\xb2\x1c\xca\xa7\x7f\x9e\xb1\xb8\x04\xbf\xa7\x29\x2a\x95\xb7\
\xc7\x7e\xf8\x17\x48\x4c\x4d\xc8\x50\x36\x7b\x63\x9e\xb6\xde\xbf\
\xa1\x69\x2a\x95\x67\x73\x0b\x3b\x0c\xef\x5d\x28\xb1\x74\x46\x8a\
\x32\xd9\x0a\xb8\xa1\x04\x79\x6f\xa2\xe6\x50\x2e\x1f\x3a\x00\x66\
\xe0\x5f\x30\xb1\xd4\x03\x7c\x2e\x61\x39\x8c\xc5\xc2\xea\x34\x61\
\x13\xb5\xac\x3a\x6a\xd8\x5a\x72\xa0\x13\xf3\xb9\xf7\x2e\x9c\x58\
\x5a\x8e\x9d\xbf\x18\x6a\xd9\x70\x34\x70\x32\x16\x5a\xdf\x3b\xaf\
\x4d\xd6\xfc\xb5\xd4\x51\x30\x79\x77\x43\xc7\x03\x17\xe6\xfc\xce\
\xb2\xb1\x10\x3b\xd6\xfa\x04\x36\x81\xff\x30\xf0\x29\x2c\x84\xe7\
\x58\xc7\x7c\x09\xe3\x58\x2c\x92\x4b\x2e\xe4\x6d\x20\xed\x98\xab\
\x44\x5d\x2e\x5a\x11\xd5\xe2\x69\xec\x6c\xfe\xaa\xbc\x5e\xd8\x96\
\xd7\x8b\xfa\x58\x8d\x5d\x6f\xf5\xa5\x9c\xdf\x2b\x44\x12\x4e\x02\
\x9e\xcc\xf3\x85\x31\x66\xfa\x23\x80\x07\x81\x4f\x44\x78\xb7\x10\
\x43\x31\x1b\x8b\x20\xd9\x9b\xe7\x4b\x63\x78\xaa\xae\x06\xbe\x13\
\xe1\xbd\x42\xac\x8d\x93\xc9\xd9\x38\x20\x9e\x2b\xf7\x2c\xec\xa6\
\x22\x21\x8a\xe0\x32\xe0\xa1\x18\x2f\x8e\xb9\x99\xb2\x39\x36\x69\
\x52\x74\x3f\x11\x93\x65\x58\x30\x8b\xff\xc4\x78\x79\xde\x93\xf4\
\x81\xbc\x85\xed\x1d\x34\xf1\x0e\x0a\x51\x1c\xa7\x61\x11\x25\xa3\
\x50\xc4\x61\xa0\x07\x80\x4f\x46\x4e\x47\x34\x93\xd9\xc0\x6e\xe4\
\xb8\xac\xfb\x41\x8a\xf0\x57\xd9\x09\x78\x04\x45\xe4\x10\xf9\xb2\
\x02\x98\x0a\xfc\x2b\x66\x22\x31\x87\x58\xfd\x2c\xc6\x8c\x63\xcf\
\x02\xd2\x12\xcd\xe1\x74\xec\xd0\x59\x54\x8a\xf2\x78\xec\xc0\x02\
\x07\x7f\xba\xa0\xf4\x44\xbd\x99\x05\xec\x85\x5d\x2c\x1b\x95\x22\
\x5d\x82\x27\x61\xe7\x22\x52\x45\xb8\x13\xa2\x8f\xb7\xb1\x61\x7b\
\x57\x11\x89\x15\x19\x14\xed\x05\xe0\x5b\x05\xa6\x27\xea\xc9\x37\
\x28\x47\x38\xd8\x28\xb4\x30\x6f\x5f\x6f\x97\x68\xa9\x9a\x3a\x8f\
\x82\xf1\x38\x75\xb5\x0e\x76\xd3\xcf\x2e\x0e\x69\x8b\xea\xf2\x20\
\xb6\xd0\xd3\x5d\x64\xa2\x5e\xc7\x12\x27\x62\x71\x67\x37\x70\x4a\
\x5f\x54\x8b\xd7\xb0\x3f\xd4\xa2\xc3\xc0\xba\x05\x66\xee\x02\x0e\
\xa7\x80\x55\x08\x51\x79\x7a\x80\xc3\x70\x30\x0e\x28\x66\x1f\x64\
\x28\x16\x00\x2f\x03\x07\x3b\xe6\x41\x94\x9f\x19\xd8\xa5\x41\x2e\
\x78\x1a\x08\xd8\xb2\xef\x08\xb4\x89\x28\x06\xe7\x27\xd8\x05\xa5\
\x8d\xa6\x85\x85\x88\xf4\x5e\x21\x91\xca\xa5\xf3\x29\x61\xe8\x1e\
\x2f\xda\x31\xb7\x01\xef\x4a\x91\xca\xa1\xab\xf0\x1f\xdd\x94\x8e\
\x4e\xe0\x36\xfc\x2b\x47\xf2\xd5\xcd\xe4\x18\xb6\xa7\x6e\xac\x03\
\xdc\x85\x7f\x25\x49\x3e\xba\x13\x18\x85\x58\x2b\xeb\x00\xb7\xe2\
\x5f\x59\x52\xb1\xba\x19\x19\x47\x62\x3a\xd1\x9c\xa4\x49\xfa\x0b\
\x1a\x56\x05\xd3\x0e\x5c\x84\x7f\xe5\x49\x71\x75\x3e\x9a\x90\xa7\
\xa6\x05\xfc\x0c\xff\x4a\x94\xe2\xe8\xc7\x68\x29\x37\x17\x66\x60\
\x47\x2c\xbd\x2b\x54\xca\x47\xdd\xc0\xd1\x88\x5c\xd9\x07\x45\x4e\
\xaf\x83\xfe\x0b\xec\x81\x88\xc2\x47\xb1\x00\x10\xde\x95\x2c\xa5\
\xd3\x83\x64\xbb\xad\x4b\x24\x60\x14\x70\x01\xfe\x95\x2d\x85\xe9\
\x3c\x60\xe4\x20\xf5\x29\x22\xd0\xc2\xe6\x25\xef\xe0\x5f\xf1\xd2\
\xda\xf5\x16\x9a\x6f\xb8\x31\x09\x0b\x1e\xe6\xdd\x08\xa4\xc1\x75\
\x1f\x76\x27\xbc\x70\xa4\x03\x8b\x91\xa4\x55\xae\xf2\xa8\x07\x73\
\x55\x57\xb0\xc0\x12\x31\x05\x8b\xf0\xed\xdd\x38\x9a\xae\x07\x80\
\x1d\x86\xa9\x2b\xe1\x44\x1b\x76\x37\xc9\x32\xfc\x1b\x4a\xd3\xf4\
\x2e\x16\xd6\x49\xbb\xe2\x15\xe0\x23\xc0\x15\xf8\x37\x9a\xa6\xe8\
\x52\x60\xb3\x44\x35\x23\x4a\xc5\x34\xe0\x61\xfc\x1b\x50\x5d\xf5\
\x00\x0a\x27\x5b\x79\x46\x00\x47\x02\xcf\xe0\xdf\xa0\xea\xa2\xa7\
\xb0\xa8\x34\x5e\x51\x71\x44\x04\xda\x81\xe3\xb0\x10\xa8\xde\x0d\
\xac\xaa\x9a\x8f\xed\x69\x68\x9e\x51\x63\x3a\x80\x63\xb0\x88\x2a\
\xde\x0d\xae\x2a\x9a\x03\x1c\x85\xce\x6c\x34\x8a\x16\x30\x1d\xb8\
\x1d\xff\x06\x58\x56\xcd\xc4\x9c\x44\xe5\x92\xde\x70\xb6\x06\xce\
\x02\x16\xe1\xdf\x28\xbd\xb5\x08\xf8\x25\xb0\x55\xa6\x12\x15\xb5\
\xa4\x03\x38\x14\xb8\x11\x3b\xb3\xe0\xdd\x58\x8b\xd2\x72\xe0\x06\
\x2c\xca\xa5\x86\x51\x22\x11\xeb\x03\xc7\x62\x61\x2f\x7b\xf0\x6f\
\xc4\x79\xab\x1b\xb8\x09\x9b\x8f\x8d\xc9\xa9\xcc\x44\x43\x59\x0f\
\x38\x08\x73\xd9\x7e\x09\xff\xc6\x9d\x56\x0b\x80\x73\x81\x03\xd1\
\x4d\x5f\x89\xd0\xe4\x2b\x9c\x16\x30\x19\xd8\x1d\xd8\x15\xbb\x86\
\xb8\xac\xe3\xf5\xf9\x98\x47\xed\x2c\xec\x8e\xc8\xf9\x98\xa1\x88\
\x84\xc8\x40\xf2\x61\x23\xec\x4a\xe2\x29\xc0\x8e\x7d\x9f\xdb\x51\
\xdc\x78\xbe\x07\x78\x1a\x98\x07\xcc\xed\xfb\x9c\x83\x1d\x6f\x15\
\x19\x90\x81\xc4\xa3\x0d\xd8\x04\x3b\x0f\xd1\xaf\x8d\x80\xf1\x7d\
\xda\x10\x18\x87\x9d\xb2\x1b\x28\xb0\xf9\xc1\x40\x2d\xc5\xce\xe3\
\x2f\xe9\xfb\x5c\x8c\x0d\x97\xba\xfa\x3e\x5f\x01\x56\xc7\xff\x49\
\x42\x08\x21\x84\x10\x42\x08\x21\x84\x10\x42\x08\x21\x84\x10\x42\
\x08\x21\x84\x68\x36\xff\x03\x82\xd8\xdf\x81\x6d\xe3\x62\x71\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x12\xeb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xc8\x00\x00\x00\xc8\x08\x06\x00\x00\x00\xad\x58\xae\x9e\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x12\x8d\x49\x44\x41\x54\x78\x9c\xed\
\x9d\x7b\x90\xde\x65\x75\xc7\x3f\xef\x66\x77\x43\x52\xc9\xc5\xa0\
\x01\xb9\x25\x80\x49\x29\x82\x92\x48\x1c\x40\x82\x63\x91\xa6\xd6\
\x2a\x66\x50\x6b\x28\x04\x46\x3a\x75\x74\x6c\x95\x5e\xec\x58\xd1\
\xcd\x0c\x08\x52\x3b\xf5\x5e\x5a\xb9\x38\x60\x47\x0b\xb4\x30\x04\
\x01\x61\x10\x15\x48\x02\x91\x10\x82\x40\x42\x08\x09\x25\x40\xb8\
\xc4\x10\xe4\xb6\x9b\x5b\xff\x38\xbb\x66\x59\xb2\xbb\xbf\xe7\x7a\
\x9e\xdf\xef\x3d\x9f\x99\xef\xbc\xff\xbc\xcf\xfb\x3b\xcf\xe5\xbc\
\xbf\xe7\x7a\x1e\x30\x0c\xc3\x30\x0c\xc3\x30\x0c\xc3\x30\x0c\xc3\
\x30\x0c\xc3\x30\x0c\xc3\x30\x0c\xc3\x30\x0c\x63\x37\x2d\x6d\x03\
\x1a\x4c\x27\x70\x00\x30\x0d\x98\xde\xff\xb9\x2f\x30\xa5\x5f\xfb\
\x00\x93\x81\xb1\x40\xf7\x20\x01\xf4\x0d\xd2\x6b\xc0\x0b\xc0\xf3\
\xc0\xe6\xfe\xcf\x4d\xc0\x86\x41\xda\x08\x6c\x4f\x9b\x9d\xf6\xc4\
\x1c\x24\x0e\xfb\x01\xb3\x80\xa3\x06\x69\x06\xe2\x24\x39\xd8\x0e\
\xac\x01\x56\xf5\xeb\x01\xe0\x5e\xc4\x91\x8c\x00\xcc\x41\xdc\x69\
\x01\x47\x00\x27\x00\xc7\x01\xc7\x23\x6f\x88\x12\x79\x0c\x58\x02\
\xdc\x05\xdc\x01\x3c\x04\xec\x52\xb5\xc8\x68\x24\x13\x81\x53\x81\
\x4b\x90\xee\xcc\xae\x9a\xea\x09\xe0\x07\xc0\x7c\x60\x42\xd4\x12\
\x32\xda\x8e\xc9\xc0\x59\xc0\xcd\xc0\x36\xf4\x1b\x77\x6c\x6d\x03\
\x6e\x04\xce\x04\x26\xc5\x29\x32\xa3\xe9\x74\x03\x1f\x07\x6e\x40\
\x06\xc8\xda\x8d\x38\x97\x7a\x81\xeb\x91\xb7\xe4\xc0\x44\x81\x61\
\xfc\x9e\x99\xc0\x37\x80\xe7\xd0\x6f\xac\xda\x7a\x06\xb8\x08\x99\
\x64\x30\xda\x98\x16\x30\x0f\xb8\x15\xfd\x46\x59\xaa\x7e\x06\x7c\
\x00\x9b\xcc\x69\x2b\xba\x91\xb1\xc5\x03\xe8\x37\xc0\xba\xe8\x7e\
\x60\x21\xd6\xfd\x6a\x34\x5d\xc0\xa7\x80\xc7\xd1\x6f\x70\x75\xd5\
\x7a\x64\x50\x9f\x6b\x7d\xc7\xc8\xc0\x18\xe0\x74\xe0\x51\xf4\x1b\
\x58\x53\xb4\x06\x58\x00\x74\x38\xd4\x83\x51\x20\x73\x81\x15\xe8\
\x37\xa8\xa6\x6a\x39\xb2\x50\x6a\xd4\x8c\x69\xc0\x55\xe8\x37\xa0\
\x76\xd1\x8f\x81\x83\xaa\x54\x8c\xa1\x4b\x27\xf0\x0f\xc0\x2b\xe8\
\x37\x9a\x76\xd3\xcb\xc0\x39\x48\x97\xd6\x28\x90\xa3\xb1\xee\x54\
\x09\xba\x07\xd9\xac\x69\x14\x42\x17\x70\x1e\xb2\xa3\x55\xbb\x71\
\x98\x44\xdb\x80\x45\xd8\x6c\x97\x3a\x33\x90\x81\xa2\x76\x83\x30\
\xed\x59\xcb\x80\x43\x87\xad\x3d\x23\x19\x2d\xe0\x6c\xa4\xdf\xab\
\xdd\x08\x4c\x23\xeb\x77\xc8\xda\x89\xad\xc6\x67\x62\x3c\x70\x25\
\xfa\x15\x6f\x72\xd3\x65\xc0\xb8\x3d\xd4\xa7\x11\x91\x43\x80\x95\
\xe8\x57\xb6\xc9\x4f\xf7\x22\x53\xf0\x46\x02\x4e\x06\xb6\xa0\x5f\
\xc9\xa6\x30\x6d\x06\xde\x8f\x11\x95\xb3\xb1\x59\xaa\x26\x69\x1b\
\x32\x2e\x31\x02\xe9\x00\xce\x47\xbf\x42\x4d\x69\xb4\x08\x1b\xbc\
\x7b\xd3\x85\x0d\xc6\xdb\x41\x3f\xc4\xd6\x4b\x9c\x19\x0b\x5c\x8b\
\x7e\xe5\x99\xf2\xe8\x1a\xec\xac\x49\x65\xc6\x23\x81\x12\xb4\x2b\
\xcd\x94\x57\x3f\xc5\xa6\x81\x47\x65\x3c\xf0\x73\xf4\x2b\xcb\xa4\
\xa3\xdb\x30\x27\x19\x96\xb1\xd8\x9b\xc3\x24\x6f\x12\xeb\x6e\x0d\
\xa1\x0b\x1b\x73\x98\x76\xeb\x6a\x6c\xe0\xfe\x7b\x3a\x80\x1f\xa1\
\x5f\x29\xa6\xb2\x74\x39\x05\x4c\x01\x97\x70\xb8\xe5\x3c\xe0\xb3\
\xda\x46\x18\xc5\xf1\xae\xfe\xcf\x5f\x68\x1a\xa1\xcd\xd9\xe8\xff\
\x53\x99\xca\xd6\x42\xda\x94\x93\xb1\xed\x23\xa6\xd1\xb5\x0d\xc5\
\xbd\x5b\x5a\x7d\xbc\x43\x90\x9d\x9d\x16\x34\xd9\xa8\xc2\x66\x60\
\x36\x12\xd3\x2c\x2b\x1a\x71\x8d\xc6\x03\xff\x8b\x39\xc7\x00\x6b\
\x81\xcf\x03\x87\x03\x7b\xf5\xeb\x8f\x90\x00\x08\xeb\x14\xed\x2a\
\x89\x29\xc0\xff\x20\x65\xd3\x68\x5a\xd8\xfe\xaa\xc1\x5a\x84\x4c\
\x71\x0f\x47\x37\x32\x89\xa1\x6d\x67\x29\xba\x94\x02\x66\xb6\x52\
\x62\x83\xf2\xdd\x72\x99\xb9\xfb\xdb\x02\xec\x2d\x45\x0b\x1d\xca\
\xad\x56\xcc\xc0\xce\x90\x0f\xe8\x27\x8e\x65\xd7\x42\xba\x18\xda\
\x76\x97\xa0\xdf\xd1\xc0\x40\x10\x5d\x58\xf4\x91\xc1\xf2\xa9\xe0\
\x19\x05\xd8\x5d\x8a\x96\x31\x72\xd7\xb4\x76\x58\x3f\x7a\xb7\x96\
\x06\x94\xe3\xdd\x05\xd8\x5f\x8a\x16\x05\x94\x63\x65\x72\xcc\x62\
\x1d\x0d\xfc\x53\x86\xe7\xd4\x85\x5f\x07\xa4\xbd\x37\x9a\x15\xf5\
\xe7\x4b\x64\x88\xe0\x98\xda\x41\x3a\x91\x9b\x61\x4b\xd8\xd2\x52\
\x0a\x5b\x02\xd2\x3e\x1f\xcd\x8a\xfa\x93\xa5\x6d\xa5\x76\x90\x2f\
\x00\xb3\x12\x3f\xa3\x6e\xec\x54\x4a\xdb\x44\x8e\x01\x3e\x97\xf2\
\x01\x29\x1d\x64\x1a\x99\xfa\x89\x46\x5b\x73\x3e\x70\x60\xaa\x1f\
\x4f\xe9\x20\x17\x61\xa7\xc3\x8c\xf4\x8c\x07\x2e\x4c\xf5\xe3\xa9\
\x1c\x64\x2e\xf0\xb1\x44\xbf\x6d\x18\x43\x59\x00\x1c\x9b\xe2\x87\
\x53\x38\xc8\x18\xe0\x9b\x09\x7e\xd7\x30\x46\xe2\x5b\x24\x68\xcf\
\x29\x1c\x64\x01\x32\xb5\x6b\x18\x39\x39\x06\xf8\x78\xec\x1f\x8d\
\xed\x20\x5d\xc0\x57\x23\xff\xa6\x61\x54\xa5\x87\xc8\x67\xd9\x63\
\x1f\x8c\x3f\x83\xe6\xec\x93\xf9\x3f\xe0\x0e\x64\xdd\x62\x2a\xf0\
\x3e\xe0\x2d\x9a\x06\x45\x64\x2b\x70\x3b\xb0\x11\x98\x00\x1c\x07\
\x1c\xa6\x6a\x51\x1c\x66\x22\x3d\x98\x2b\xb4\x0d\xd9\x13\xdd\xc0\
\x06\xf4\xb7\x20\x84\x6a\x13\x70\x2a\x6f\x7c\xbb\x76\x01\x9f\x46\
\x36\xcb\x85\xfc\x7e\x8f\x53\xa9\xbe\x9e\x9e\xc0\x67\xf7\x02\x5f\
\xe4\x8d\xb3\x8b\x2d\x60\x1e\xb0\x3e\xf0\xf7\x4b\xd0\x3a\x0a\xdd\
\xa7\x75\x16\xfa\x85\x13\xaa\xd5\xc0\xc1\xa3\xe4\x73\x16\xf0\x62\
\xc0\x33\x7a\x46\x2f\xca\x61\xe9\x09\x78\x6e\x1f\x70\xd2\x28\xbf\
\xff\x56\xe0\xc1\x80\x67\x94\xa2\xbf\x1c\xbd\x28\xf3\xd2\x02\x7e\
\x83\x7e\xc1\x84\xe8\x97\xc0\x9b\x2b\xe6\x77\x61\xc0\x73\x7a\x2a\
\x3e\x63\x4f\xf4\x04\x3c\xb7\xea\x7e\xb8\x7d\x90\x0d\x95\xda\xf5\
\x11\xa2\x95\x14\x76\xb0\x6a\x1e\xfa\x85\x12\xa2\xeb\x70\x3b\xce\
\x39\x06\xff\xee\x48\x8f\xc3\x73\x86\xd2\xe3\xf9\xcc\xad\xc8\x82\
\x5a\x55\xc6\x23\x11\x0e\xb5\xeb\x25\x44\xa3\xbd\x2d\x2b\x11\x6b\
\x16\xeb\xef\x22\xfd\x8e\x06\x57\x23\x8b\x9a\xaf\x39\xa4\xd9\x01\
\xdc\x92\xc6\x9c\x24\xfc\x12\x78\xc5\xe1\xfb\xaf\x00\xf3\x81\x1b\
\xd2\x98\x93\x85\xbf\x8f\xf1\x23\x31\x1c\x64\x26\x91\xbc\x55\x81\
\xeb\x90\x59\x8f\x6d\x1e\x69\x9f\x88\x6c\x4b\x4a\x9e\xf4\x48\xd3\
\x8b\x4c\x56\xdc\x14\xd9\x96\x5c\xfc\x09\x11\x66\xe6\x62\x38\xc8\
\x5f\x45\xf8\x0d\x0d\x6e\x07\x3e\x89\xc4\xe6\xf2\x61\x62\x44\x5b\
\x52\x33\xc1\x33\x5d\x2f\xf2\x76\x5d\x16\xd1\x96\x9c\x9c\x1d\xfa\
\x03\xa1\x0e\x32\x96\x7a\x1e\xa2\x5f\x0d\x7c\x14\xb7\x6e\xd5\x50\
\x8e\x8f\x64\x4b\x0e\x8e\xc5\x7f\xd0\xfa\x32\xf0\x21\xe0\xd1\x78\
\xe6\x64\xe3\x4c\x02\xa7\x7c\x43\x1d\xe4\x23\xc8\xac\x47\x9d\xd8\
\x02\xfc\x39\x32\x70\xf5\x65\x36\x89\x36\xc7\x25\x62\x3a\xf0\xc1\
\x80\xf4\x9b\x81\x0f\x23\x6b\x40\x75\x62\x2a\x52\xd7\xde\x84\x3a\
\xc8\x19\x81\xe9\x35\x58\x40\xd8\xbf\xe1\xde\xc0\x65\x91\x6c\xc9\
\xc9\xf7\x08\xdb\x09\xf0\x30\x05\xae\x2f\x54\x40\xad\x8d\x4e\x46\
\x16\x9f\xb4\xa7\xf3\x5c\x74\x41\x60\x9e\x0f\x40\xce\x94\x87\xd8\
\xd0\x13\xf0\xfc\x9e\xc0\x67\xdf\x03\xbc\x2d\xe0\xf9\x20\x3b\xb5\
\xb5\xeb\xd1\x45\xbd\x04\x8c\x17\x43\xde\x20\xa7\x50\xe8\x92\xfe\
\x30\x2c\x07\xbe\x12\x90\x7e\x56\xff\x6f\xcc\x8e\x63\x8e\x0a\xc7\
\x20\x79\x08\x09\x76\xf0\x45\xe0\xfe\x38\xe6\x64\xa1\x1b\xe9\x1e\
\x7a\x11\xe2\x20\x9f\x08\x48\x9b\x9b\x3e\x64\x2b\x8c\xcf\x74\x2e\
\xc0\x89\xc8\x3d\x15\xfb\xc6\x32\x48\x91\xb7\x01\xbf\xc2\x7f\x92\
\xa1\x17\x29\xcb\x1d\xd1\x2c\x4a\x8f\x77\x5b\xf5\x75\x90\x49\xc0\
\x1f\xfb\x3e\x54\x81\xf3\x90\x3d\x46\x3e\xcc\x05\x6e\x44\xc6\x1e\
\x4d\x61\x22\x72\x1f\xa4\xef\x44\xc3\x7d\xc0\xd7\xe3\x99\x93\x9c\
\x93\xf1\xac\x3f\x5f\x07\x39\x89\xfa\xdc\x21\xb7\x0e\xf8\x17\xcf\
\xb4\xb3\x91\x2d\x17\x2e\xdb\x34\xea\xc2\x9b\x10\x27\xf1\xed\x6e\
\x9d\x8f\x1c\x09\xa8\x03\x5d\x78\xde\x31\xe2\xeb\x20\xf3\x3c\xd3\
\x69\x70\x0e\x7e\xeb\x1d\x07\x21\x5b\x2d\xde\x14\xd7\x9c\xa2\x98\
\x80\xbc\x1d\xf7\xf7\x48\xfb\x0a\xf0\x8f\x71\xcd\x49\xca\x9f\xfa\
\x24\xf2\x71\x90\x81\xb3\x03\x75\xe0\x4e\x60\xb1\x47\xba\x71\xc8\
\x36\x94\x26\x8c\x39\x46\x63\x7f\xe4\xbe\x96\xb1\x1e\x69\xaf\x42\
\xc2\xa1\xd6\x81\x79\x78\x2c\x96\xfa\x38\xc8\x11\xf8\xfd\xe3\x68\
\x70\x2e\x32\xd5\xe7\xca\xb7\x69\xaf\x73\xf5\x73\x80\x7f\xf5\x48\
\xb7\x8b\xfa\x1c\xb1\x3e\x18\x09\x00\xee\x84\x8f\x83\x9c\xe0\x91\
\x46\x83\x5f\xe1\x77\x43\xea\x29\x44\xd8\xc3\x53\x43\x3e\x8b\xdf\
\x6a\xfb\x2d\x84\x05\xe4\xce\x89\x73\xdb\xf5\x71\x90\xe3\x3c\xd2\
\x68\xe0\x33\x30\x9f\x02\xfc\x67\x6c\x43\x6a\xc4\xa5\xb8\x5f\x8d\
\xb7\x0b\xbf\xb7\x8f\x06\xce\x53\xdb\x3e\x0e\x52\x87\x4d\x7a\x6b\
\x90\xc1\xa7\x2b\x17\xd2\x9c\xc0\x0c\x3e\xec\x8b\x4c\x89\xbb\x72\
\x1d\x12\x8f\xa0\x74\xde\xeb\x9a\xc0\xd5\x41\xf6\x43\x36\xbe\x95\
\xce\x7f\xe0\x1e\xe8\x79\x16\xed\xd9\xb5\x1a\xca\x67\x80\x23\x1d\
\xd3\xec\x40\xca\xbc\x74\x0e\x43\x36\x30\x56\xc6\xd5\x41\xea\x10\
\xa9\x7d\x3b\xf0\x23\x8f\x74\xa1\xfb\xb4\x9a\x42\x0b\xf8\x9a\x47\
\xba\x2b\xa9\x47\xf4\x79\xa7\xc9\x17\x57\x07\x49\x7e\x61\x49\x04\
\x6e\x04\x9e\x73\x4c\x73\x2c\xb2\xda\x6a\x08\x1f\x42\xf6\x6d\xb9\
\xf0\x24\x70\x6b\x02\x5b\x62\xe3\xd4\x86\x9b\xe8\x20\xd7\x78\xa4\
\xb1\x1b\xb0\xde\x88\x4f\x99\xf8\x94\x7d\x6e\x9c\xba\x8f\x4d\x73\
\x90\xed\xb8\x07\x1a\x38\x84\x80\xdd\x9e\x0d\xe6\x14\x64\x37\x81\
\x0b\x8b\xf1\x5b\x77\xca\x49\xb2\x37\x48\x27\x1e\x0b\x2d\x99\x59\
\x82\xfb\x15\x67\x36\x30\xdf\x33\x1d\xc0\xa7\x1c\xd3\x3c\x83\x9c\
\x39\x29\x99\x3f\xc4\xe1\xda\x36\x17\x07\x39\x80\xf2\x37\x28\xde\
\xe6\xf8\xfd\x0e\xea\x79\x4a\x2e\x17\xa7\xe3\xbe\x3d\xe3\xe7\x29\
\x0c\x89\x48\x37\x32\x1b\x5b\x09\x17\x07\x99\xe6\x6c\x4a\x7e\x6e\
\x77\xfc\xfe\x1c\x12\x5e\xdf\xd5\x00\xa6\xe3\x7e\x40\xac\x74\x07\
\x01\x87\xa5\x0a\x17\x07\x29\x7d\xfd\x63\x07\xee\x57\x2c\xdb\xd8\
\x63\x74\x5c\xcb\xa8\xf4\x2e\x16\x24\x72\x90\x69\xee\x76\x64\xe5\
\x41\xe0\x55\xc7\x34\x1f\x48\x61\x48\xc3\x70\x2d\xa3\x17\x81\xb5\
\x29\x0c\x89\x48\x12\x07\x29\x7d\xeb\xf7\x7d\x8e\xdf\xdf\x9b\x7a\
\x9f\x2f\xcf\xc5\x1c\xe0\x0f\x1c\xd3\xac\x48\x61\x48\x44\x2a\xaf\
\xa6\xbb\x38\xc8\x14\x0f\x43\x72\xf2\x88\xe3\xf7\x67\x53\x58\x04\
\xf0\x42\xe9\xc0\x7d\xeb\xbf\x6b\x5d\xe4\xa6\x72\x5b\x6e\x92\x83\
\xb8\xbe\xd6\xeb\xb0\x6d\xa6\x14\x5c\xdf\xb4\xa5\x77\xb1\x92\x38\
\x48\xe9\x11\x14\x37\x38\x7e\x5f\x6b\x4d\x27\x24\x92\x4c\xca\x7b\
\xed\x47\xc2\xb5\xac\xd6\x27\xb1\x22\x1e\x95\xdb\xb2\x4b\x81\x4f\
\xf6\x30\x24\x27\xcf\x3a\x7e\x5f\xeb\x4e\xbe\x90\x72\xd4\xfa\x93\
\x7a\xbb\xe3\xf7\x9f\x49\x62\x45\x3c\x2a\xd7\x81\x8b\x83\xf8\x9c\
\x59\xce\x89\xeb\x06\x45\xad\x49\x07\xd7\x4d\x80\x83\x79\x77\x34\
\x2b\xdc\x70\x2d\x2b\xd7\xba\xc8\x4d\xe5\xb6\xec\xe2\x20\xdd\x1e\
\x86\xe4\xa2\x0f\xb7\x0b\x62\x40\xee\xe3\xd3\xe0\x3d\xb8\xff\x23\
\x83\xdc\xc3\x12\xe2\x5c\x21\x38\x9d\xa1\x20\x2c\x30\x78\x0e\xda\
\xce\x41\x7a\x3d\xd2\x68\x86\xf3\xf9\x1a\x6e\x33\x68\x2d\xe4\xb4\
\xa3\x16\xae\xd3\xbc\x03\x31\x71\x4b\x25\x49\x6f\x68\x07\xfa\x81\
\x88\x87\x93\xcf\x2b\x5d\x3b\x3f\x7f\xe3\x60\xeb\x39\xca\xb6\xfa\
\x5c\x32\xf4\x82\xb2\xcd\x23\x29\x49\xd8\x54\xed\x06\x35\x92\x7e\
\xeb\x91\x9f\x12\x22\xd3\x9f\xc7\xc8\x6f\xe6\xb1\xc8\x9b\x43\xdb\
\x4e\x9f\xb7\x41\xe8\x7d\xf2\x29\x95\xc4\x41\x5e\x2d\x20\x63\xc3\
\x69\x1b\xee\x8b\x7e\xbf\x2d\xc0\xee\x5d\x48\x68\xd4\x2f\x20\xf1\
\xc6\xf6\x42\x82\xd6\xbd\x03\xb9\x18\xf5\xb1\x02\xec\xdb\x05\x3c\
\xef\x58\xb6\x1d\x05\xd8\x3c\x92\x2a\x8f\x57\x5d\x1a\xd5\x56\xfc\
\xef\xba\xcb\xc1\x38\xdc\x42\x8c\x3e\x8e\xfb\x81\xa0\x76\x65\x03\
\x6e\x9b\x55\x27\x50\xf6\x40\xfd\x05\x2a\x4e\xf5\xba\x0c\xd2\xfb\
\xfc\x6c\xc9\x86\xeb\xa0\xbb\x6e\xd7\x89\x69\xe2\x5a\x56\xa5\x47\
\xc2\xaf\xdc\x65\x74\x71\x90\x90\x0b\x2f\x73\xe0\x5a\x29\x75\xba\
\xc6\x59\x1b\xd7\xb2\x6a\x4b\x07\x79\xc1\xc3\x90\x9c\xb8\xce\xd5\
\xaf\x49\x62\x45\x33\x59\xed\xf8\x7d\xd7\xba\xc8\x4d\xe5\x63\xd9\
\x2e\x0e\xe2\x3a\x50\xcb\xcd\x4c\xc7\xef\x9b\x83\x54\xc7\xb5\xac\
\x5c\xeb\x22\x37\x95\xdb\xb2\x8b\x83\x6c\xf6\x30\x24\x27\xae\x95\
\xf2\x70\x12\x2b\x9a\x89\xeb\x1b\xa4\x74\x07\xa9\xdc\x96\xdb\xf9\
\x0d\xb2\x1c\xff\x3b\x0b\xdb\x89\x3e\xa4\xac\x5c\x28\xdd\x41\x92\
\xbc\x41\x36\x79\x18\x92\x13\xd7\x4a\x79\x19\x58\x96\xc2\x90\x86\
\xb1\x04\xf7\xa3\xcc\xa5\x3b\x48\xe5\xdd\xc6\x2e\x0e\xb2\xc1\xdd\
\x8e\xac\xcc\xc4\x7d\xaa\xb7\x0e\x11\x38\xb4\x71\x2d\xa3\x49\x48\
\x30\xbe\x92\x59\x5f\xf5\x8b\x4d\x72\x90\x4e\xdc\xaf\x66\xb8\x39\
\x85\x21\x0d\xc3\xb5\x8c\x4e\x44\xef\x60\x57\x55\x36\x54\xfd\x62\
\x93\x1c\x04\xdc\x6f\x32\xbd\x1b\x78\x34\x85\x21\x0d\x61\x0d\xee\
\xa1\x94\xbc\x6e\x93\xcd\x4c\x92\x37\xc8\x46\xfc\x76\x75\xe6\xc4\
\xb5\x72\x76\x01\x57\xa4\x30\xa4\x21\x5c\x81\x94\x91\x0b\xa5\x3b\
\x48\x1f\xf0\x54\xaa\x1f\xff\x0d\xfa\x1b\xcd\x46\xd2\x4e\xdc\x8f\
\xb4\x4e\x2f\xc0\xee\x12\xb5\x13\xb9\xf8\xd2\x85\xa9\x05\xd8\x3d\
\x9a\x56\xba\x64\xc8\xb5\xaf\xb8\xca\xf1\xfb\xb9\x69\x01\x1f\x71\
\x4c\xb3\x1e\xb9\x06\xd9\x78\x3d\xd7\x20\x1b\x3a\x5d\xf8\x68\x0a\
\x43\x22\xf3\x80\xcb\x97\x9b\xe6\x20\x00\x67\x78\xa4\xf1\xb9\x51\
\xa9\xe9\x9c\xef\x91\xc6\xa7\xec\x73\x93\xb4\x0d\xff\x19\xfa\xaf\
\xc8\x2a\x72\xed\x1a\x00\xdc\x54\x80\xdd\xa5\x68\xb1\x47\xf9\x1d\
\x56\x80\xdd\x55\x34\xcf\x25\x53\xae\x6f\x90\x7b\x1d\xbf\xaf\xc5\
\x69\x1e\x69\xfe\x99\x7a\xdc\xb1\x97\x9a\x9d\xc0\x97\x3d\xd2\xd5\
\xe5\x1a\x89\xe4\x61\x51\xd7\xa1\xff\x2f\x30\x9a\x1e\xc1\x6f\x2e\
\xfe\xbb\x05\xd8\xae\xad\x6f\x7a\x94\x5b\x27\x32\x96\xd3\xb6\xbd\
\x4a\xbb\x48\xce\x95\x4a\x99\x73\xd5\xa9\x1e\x79\x9b\x84\x6c\x43\
\xd0\xb6\x5d\x4b\x4f\xe1\x77\x6a\x74\x41\x01\xb6\x57\xd1\xe5\x1e\
\x79\x73\xe6\xd3\x4a\x99\x73\xd5\x0a\xfc\x82\x53\xcf\x2f\xc0\x76\
\x2d\xb9\xce\x00\x82\xbc\xa9\x4b\x9f\xfe\x1f\x90\xeb\x95\x72\x5e\
\x1c\xa1\x94\x39\x1f\x7d\xd0\x33\x8f\xed\xd8\xd5\xf2\xe9\x5a\x81\
\x4c\xed\x6a\xdb\x5e\x55\x59\xe2\x31\xb7\x90\x55\x75\xed\xcc\x56\
\xd1\x12\xfc\xde\x22\x7b\x21\x6f\x20\x6d\xfb\x73\x69\x39\x7e\x81\
\x01\x5b\xc8\xc4\x8d\xb6\xfd\x55\xb4\x9e\x8c\xd7\x5d\xfc\x20\x43\
\x86\x62\xe9\x63\x9e\x79\x9c\x06\x3c\x5d\x80\xfd\xa9\xf5\x24\x7e\
\xd3\xe2\x20\x97\x7c\x6a\xdb\x5f\x55\xdf\xf7\xcc\xa3\x17\x75\xea\
\xa7\x6f\xc4\x3f\x88\xc0\x3b\x91\xf0\x35\xda\x79\x48\xa5\x2d\x48\
\x0c\x2e\x1f\xea\x36\xa1\x91\xf5\x3e\xca\x09\xc8\x69\x3c\xed\x4c\
\x57\xd5\x37\x02\xf2\x3a\x17\x89\xe8\xa2\x9d\x87\xd8\x7a\x15\x38\
\x21\xa0\x5c\xbe\x53\x40\x1e\xaa\xaa\x17\x85\x58\xcc\x37\x46\x30\
\x3c\x97\xb6\x03\x47\x06\xe4\x75\x2e\x65\xc7\x9a\x75\xd5\x16\xc2\
\x9c\x63\x16\x65\x87\xa2\x1d\xaa\xeb\x03\xf2\xea\xcd\x99\x1e\x86\
\x6a\x6a\x05\x32\xf8\xf6\xe5\x28\x9a\x31\x26\x79\x92\xb0\x3f\x8b\
\xf1\xc8\x86\x3f\xed\x7c\xb8\x48\x65\x95\x7f\x12\x65\x04\x80\x76\
\xd1\xf7\x02\xf3\x3c\x8d\xfa\xcc\xda\xec\x49\xcb\xf1\x1f\x90\x0f\
\x50\xa7\x09\x9a\x5d\x48\xf7\x58\x2d\x64\xee\xe2\x0a\x06\x96\x26\
\x9f\x15\xf6\xc1\x8c\x05\xbe\x5d\x40\x3e\x5c\xf5\x6f\x84\xdf\xf1\
\x72\x5a\x01\xf9\x70\xd5\xb5\x81\x79\x0e\xe2\xd4\x61\x8c\x2a\x59\
\x5b\x89\x73\x3f\xe1\x7c\x24\xd2\x8b\x76\x7e\x46\xd3\x53\xc0\x29\
\x11\xf2\x3b\x13\x78\xa9\x80\xfc\xb8\x2a\x46\xde\xbd\xe9\x46\x2e\
\xcf\xd4\x2e\x04\x57\xad\x01\xde\x12\x21\xff\x13\x91\xb7\x49\x89\
\x03\xd6\x1d\xc8\x5b\x23\x46\xf7\x62\x2a\xf5\xd8\xa4\x3a\x54\x4f\
\x03\x5d\x11\xf2\x1f\xc4\x45\xe8\x17\x84\x8f\x96\x13\x6f\xea\xef\
\x68\xe0\x86\x02\xf2\x34\xa0\xc5\xc8\x1a\x4e\x0c\x26\x50\xdf\x5d\
\x05\x17\x44\x2a\x83\x20\x66\xa0\x5f\x10\xbe\xba\x85\xb8\x77\x2f\
\x1e\x0d\x5c\x8d\x9c\xa9\xc8\x9d\x97\x9d\xc0\x55\xc0\xbb\x22\xe6\
\x67\x2c\x12\x17\x4b\xbb\x9e\x7c\x75\x68\xc4\xb2\x08\xe2\x67\xe8\
\x17\x86\xaf\xae\x26\xfe\x05\xa5\xd3\x90\x03\x58\x6b\x32\xd8\xbf\
\x1a\xf8\x12\xe1\xb3\x53\x43\x19\x8b\x0c\x70\xb5\xeb\xc7\x57\x37\
\x45\x2e\x8f\x20\x4e\x46\xbf\x40\x42\x74\x0b\x69\xee\xb4\x68\x01\
\x73\x80\xaf\x00\xbf\x40\x56\x74\x43\x6d\xed\x05\x6e\x07\xce\x45\
\xae\x85\x4e\xb1\x01\x6f\x02\xf5\x7e\x73\xec\x22\x52\xf8\xa1\x58\
\x85\xdb\x02\xee\x27\x6c\x01\x4a\x9b\x5f\x23\xdb\xe3\x7d\x6e\xcc\
\xad\xca\x78\xe0\xdd\xc0\xe1\xc8\xac\xd0\x4c\xe0\x40\xc4\x39\x07\
\x04\x72\xa3\xd3\x80\x9e\x40\xde\x12\x6b\xfa\x3f\x97\xe3\x1e\x2b\
\xd7\x85\x7d\x91\x7f\xdf\x98\x5d\xb5\xdc\xdc\x07\xcc\x46\x1c\xa5\
\x18\x16\xa2\xff\xaf\x11\xaa\x47\x88\x33\x05\x5c\x57\x66\x52\xcf\
\xd9\xaa\xa1\x5a\x10\xbb\x60\x62\xd0\x4d\x3d\xce\x25\x8f\xa6\x17\
\x81\x4f\x44\x2e\x9b\x3a\x70\x1a\xf5\x5c\xe7\x18\xaa\xb5\x14\x30\
\xb5\x3b\x1c\x67\xa1\x5f\x40\xb1\xf4\xef\x84\xed\xdd\xaa\x0b\xe3\
\x81\x4b\xd0\x2f\xef\x58\x2a\x3a\xba\x4a\x27\xd2\x4d\xd1\x2e\xa4\
\x58\x5a\x49\xbc\xf5\x84\x12\x99\x45\xfd\x36\x1e\x8e\xa4\x87\x80\
\x31\x51\x4b\x28\x01\x75\x89\x70\x51\x55\x3b\x90\xf3\xda\x25\xdf\
\x11\xef\xca\x24\xe4\xdc\x7d\x89\x3b\x00\x42\xe4\x7b\x7a\x34\x2b\
\x1d\xc8\x4c\x8b\x76\x61\xc5\xd6\xd3\xc0\x27\xc9\x78\xae\x39\x01\
\x2d\xe4\x98\x6c\x9d\x4e\x02\x56\xd5\x52\x6a\x54\x37\xc7\xa3\x5f\
\x60\xa9\x74\x0f\x72\x7c\xb3\xf4\x4b\x62\x06\xd3\x81\x44\x1f\xa9\
\xf3\x56\xfd\xd1\x34\x27\x5a\x69\x65\xe2\xc7\xe8\x17\x5a\x4a\xad\
\x02\xfe\x82\xb2\xfb\xbc\x9d\x48\x97\xb7\x2e\x71\xab\x7c\x55\xcb\
\x3b\x5e\x0e\x42\x2e\xca\xd4\x2e\xbc\xd4\x5a\x07\x7c\x95\xb2\xee\
\xe5\x3b\x0c\xe8\xa1\x19\xd3\xee\xa3\xe9\x25\x60\xff\x28\xa5\xa6\
\xc0\x39\xe8\x17\x60\x4e\xdd\x09\xfc\x35\x71\xb6\xd2\xbb\xf2\x56\
\x24\xea\xe5\x12\x07\x7b\x9b\xa0\xcf\xc5\x28\x3c\x2d\xc6\x20\x7d\
\x76\xed\x42\xd4\xd0\x2a\x64\xf6\xeb\xc3\xc8\xac\x51\x6c\x26\x21\
\xa1\x42\xbf\x45\xb3\xa6\x6a\x5d\xb4\x94\xc4\x5d\xdc\x1c\xa3\xfe\
\x77\x22\xfb\x9c\x3a\x33\x3c\xab\x54\x76\x02\x8f\xb1\x7b\x3f\xd5\
\x9a\x7e\x3d\xcb\xee\x3d\x57\x2f\xb1\xfb\xfa\x85\x0e\xe4\xac\xca\
\xc0\xfe\xac\xa9\xec\xde\xbb\x35\xa0\x43\xa8\xd7\x44\x41\x6c\xb6\
\x21\xc7\x0b\x1e\xd4\x36\x24\x06\x8b\xd0\xff\xb7\xa9\x83\x06\x9c\
\x45\xdb\x8e\x3a\xe8\x5c\x32\x90\x6b\xde\xb8\x0b\xb8\x03\x78\x4f\
\xa6\xe7\x19\xcd\xe6\x2e\xe0\x7d\x64\xb8\x75\x39\xe7\xc2\xca\xa1\
\xc8\xd6\x8d\xec\x11\xee\x8c\x46\xf1\x22\xd2\x6d\xdf\x90\xe3\x61\
\x39\xfb\xb0\xeb\xa8\xf9\x8c\x83\x51\x04\x9f\x21\x93\x73\x68\xd0\
\x02\x2e\x43\xbf\xff\x6a\xaa\xa7\x2e\x26\x33\x1a\x7b\x57\xc6\x21\
\xeb\x05\xb3\x14\x9e\x6d\xd4\x97\xbb\x81\x13\x91\x23\xc7\xd9\xd0\
\xda\xdc\x35\x0d\xd9\x17\xf4\x66\xa5\xe7\x1b\xf5\xe2\x39\xe4\x0f\
\x75\x63\xee\x07\x6b\xcd\xa3\x6f\x40\xb6\x26\x27\x9f\x85\x30\x6a\
\x4f\x1f\x12\xc1\x33\xbb\x73\x80\xee\x46\xbb\xf5\x48\x40\x02\xd5\
\xd0\x90\x46\xf1\x2c\x44\x82\xf2\xa9\xa0\xbd\x13\x75\x25\xf2\x16\
\x3b\x51\xd9\x0e\xa3\x4c\xce\x25\x3c\x22\x7f\xed\x69\x01\x3f\x44\
\x7f\x86\xc4\x54\x96\x2e\xa1\x46\x07\xa0\x52\xd3\x09\x5c\x83\x7e\
\xa5\x98\xca\xd0\x4f\xd0\xef\xdd\x14\x47\x37\xf5\xba\xd6\xcd\x94\
\x46\x8b\x29\x38\x6c\x8f\x36\xe3\x80\xdb\xd0\xaf\x24\x93\x8e\x6e\
\xa5\x3d\x42\x2d\x05\x31\x0e\xf8\x29\xfa\x95\x65\xca\xab\xc5\x98\
\x73\x54\xa6\x1b\x1b\x93\xb4\x93\xfe\x1b\xeb\x56\x39\xd3\x09\x5c\
\x8e\x7e\xe5\x99\xd2\xea\x12\x6c\x40\xee\x4d\x0b\x09\x3e\xa0\x5d\
\x89\xa6\x34\xfa\x32\x36\x95\x1b\x85\x85\xc8\x11\x4b\xed\x0a\x35\
\xc5\x51\x2f\x12\x2c\xdb\x88\xc8\xfb\x81\xe7\xd1\xaf\x5c\x53\x98\
\x9e\x05\xe6\x62\x24\xe1\x60\x24\x00\x84\x76\x25\x9b\xfc\x74\x37\
\x70\xc0\x1b\x6a\xd5\x88\xca\x5e\xc0\xa5\xe8\x57\xb6\xc9\x4d\x17\
\x23\xf7\x1e\x1a\x19\x68\x21\xe3\x12\x8b\x00\x52\xbe\xb6\x62\xe3\
\x0d\x35\x0e\x05\x96\xa1\xdf\x08\x4c\x7b\xd6\x9d\xc0\xf4\x61\x6b\
\xcf\xc8\x42\x17\x12\x77\xcb\x66\xb9\xca\x51\x1f\xb2\x55\xbd\x9d\
\x83\x05\x16\xc7\x51\xb4\x6f\x98\xd3\x92\xb4\x14\x78\xc7\x28\x75\
\x65\x28\x31\x06\xf8\x3c\xed\x11\x55\xbe\x34\xbd\x84\x84\x75\xb2\
\x55\xf1\x1a\x70\x20\xf0\x5f\xe8\x37\x9a\x76\xd1\x15\xd4\xf8\x0a\
\x82\x76\xe6\x38\x9a\x79\x1d\x5c\x29\x5a\x8a\x85\x93\xad\x3d\x1d\
\xc8\x4d\x50\xab\xd1\x6f\x50\x4d\xd1\x43\x48\x54\x9a\x76\x8e\x2e\
\xdf\x38\x3a\x81\x33\x90\x10\xa8\xda\x0d\xac\xae\x5a\x8b\xac\x69\
\xd8\x38\xa3\xc1\x74\x21\x97\xcd\xaf\x44\xbf\xc1\xd5\x45\x2b\x90\
\xbb\x0e\xed\xcc\x46\x1b\xd1\x02\x4e\x02\x6e\x46\xbf\x01\x96\xaa\
\x9b\x90\x4d\xa2\xb6\x25\xbd\xcd\x79\x3b\x70\x21\xb0\x09\xfd\x46\
\xa9\xad\x4d\xc0\x05\xc8\x45\xa0\x86\xf1\x3a\xba\x80\xf9\xc0\x75\
\xc8\x99\x05\xed\xc6\x9a\x4b\xaf\x01\xd7\x22\x51\x2e\xad\x1b\x65\
\x54\x62\x22\x70\x3a\x12\xf6\xb2\x0f\xfd\x46\x1c\x5b\xbd\xc0\xf5\
\xc8\x78\x6c\x42\xa4\x32\x33\xda\x94\xbd\x91\x1b\x65\x2f\x06\x1e\
\x47\xbf\x71\xfb\x6a\x3d\xf0\x7d\xe4\xe6\x5d\xbb\xe9\xab\x02\x36\
\xf8\x72\xa7\x05\xcc\x00\x4e\x00\x8e\x07\xde\x4b\xb9\xfd\xf5\xb5\
\xc8\x8e\xda\xbb\x90\x3b\x22\xd7\x22\x8e\x62\x54\xc4\x1c\x24\x0e\
\x53\x91\x2b\x89\x8f\x02\x8e\xec\xff\x3c\x9c\x7c\xfd\xf9\x3e\xe0\
\x61\xe4\xbe\xf4\x55\xfd\x9f\x2b\x90\xe3\xad\x46\x00\xe6\x20\xe9\
\x18\x03\xec\x87\x9c\x87\x18\xd0\x54\x60\x4a\xbf\xf6\x01\x26\x23\
\xa7\xec\x06\x0b\x64\x7c\x30\x58\x5b\x90\xf3\xf8\x9b\xfb\x3f\x9f\
\x41\xba\x4b\x1b\xfa\x3f\x9f\x62\xf7\x1d\xeb\x86\x61\x18\x86\x61\
\x18\x86\x61\x18\x86\x61\x18\x86\x61\x18\x86\x61\x18\x86\x61\x18\
\x86\x61\x18\x89\xf8\x7f\xf3\x93\xe9\x76\xba\xcb\x3b\x14\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0c\x49\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xc8\x00\x00\x00\xc8\x08\x06\x00\x00\x00\xad\x58\xae\x9e\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x0b\xeb\x49\x44\x41\x54\x78\x9c\xed\
\xdd\x6b\x8c\x95\xc5\x1d\xc7\xf1\xef\x02\x65\x17\x8d\x75\x01\x25\
\x62\xad\x01\x4d\x95\x4b\x16\xd3\x8a\x46\xc4\x0b\x44\x82\x60\x55\
\x94\xa4\x58\xaa\xbd\xc4\x68\x5f\xf4\x4d\x63\x1b\x8d\x6d\x63\xd3\
\xa6\x17\x8b\xad\xd1\x26\x26\x8d\x96\xe2\xa5\xc6\xb6\x2f\x5a\x63\
\x83\x62\xa5\xda\x40\x20\x25\x2d\xda\x8a\x59\x44\xdb\x06\xad\x17\
\xb4\x4a\x41\x2e\xba\x8b\x72\xb6\x2f\xe6\x58\x11\x76\xd9\x33\xf3\
\xcc\xcc\x7f\xce\x39\xbf\x4f\xf2\xcf\xbe\x79\x66\x9e\x39\x67\xe6\
\xbf\xcf\xe5\xcc\x33\x0f\x88\x88\x88\x88\x88\x88\x88\x88\x88\x88\
\x88\x88\x88\x88\x88\x88\x88\x34\xa4\xc3\xba\x01\x2d\x6c\x04\x70\
\x02\x30\x09\x98\x5c\xff\x3b\x11\x18\x7f\x40\x8c\x05\xba\x80\x4e\
\x60\x74\x3d\x00\xf6\x01\xfd\xf5\xbf\x7d\xc0\x0e\x60\x7b\x3d\xde\
\x04\xb6\x01\x2f\xd4\x63\x2b\xf0\x0a\x50\x4b\xfc\x79\xda\x92\x12\
\x24\x8e\x71\xc0\x4c\xe0\x34\xa0\x07\x98\x01\x4c\xc1\x0d\xfc\x1c\
\xfa\x81\x2d\xc0\xa6\x7a\x3c\x0d\x6c\xc4\x25\x96\x54\xa0\x04\x09\
\x73\x12\x70\x3e\x30\xbb\x1e\xa7\x52\xde\x77\x39\x80\x4b\x9a\xf5\
\xf5\x58\x83\x3b\xda\x88\x44\xd7\x05\x2c\x00\x7e\x0a\x3c\x8f\x1b\
\x7c\xcd\x18\x5b\x80\xdb\x80\x0b\xeb\x9f\x49\x24\x58\x17\xb0\x18\
\xf8\x35\xb0\x07\xfb\xc1\x1d\x3b\x76\x03\x0f\x00\x97\x91\xef\x54\
\x50\x5a\xc0\x5c\xe0\x97\xc0\x2e\xec\x07\x71\xae\x78\x0b\xb8\x17\
\x77\xda\x28\x72\x88\x09\xc0\x0d\x34\xf7\xe9\x53\xac\x78\x0e\xb8\
\x1e\x38\xb6\xd2\x37\x2a\x2d\xa1\x07\xb8\x07\x77\x17\xc8\x7a\x60\
\x96\x16\x7d\xc0\x0a\x60\x7a\xe8\x97\x2b\xcd\x6b\x1e\xf0\x07\xec\
\x07\x61\x33\x44\x0d\x58\x85\x3b\xf5\x94\x16\x37\x17\x58\x87\xfd\
\xa0\x6b\xd6\x58\x03\x9c\xe7\xfd\xad\x4b\xf1\x66\x01\x4f\x60\x3f\
\xc0\x5a\x25\x56\x03\x67\x7a\xf5\x80\x14\xe9\xe3\xc0\xaf\xb0\x1f\
\x50\xad\x18\x35\xe0\x7e\xe0\x63\x0d\xf7\x86\x14\xe3\x08\xe0\xbb\
\xc0\xdb\xd8\x0f\xa4\x56\x8f\xbd\xc0\xb7\xd1\x0f\x8f\x4d\xe3\x02\
\xe0\x5f\xd8\x0f\x9c\x76\x8b\x7f\x00\x73\x86\xef\x1e\xb1\x32\x16\
\xb8\x1b\xfb\x81\xd2\xce\x51\x03\x96\x03\xdd\xc3\xf4\x95\x64\xb6\
\x10\x37\x05\xdc\x7a\x80\x28\x5c\xbc\x8a\x9b\xef\x25\xc6\xba\x80\
\x3b\xb0\x1f\x10\x8a\x43\xa3\x06\xdc\x8e\xe6\x79\x99\xe9\x01\x7a\
\xb1\x1f\x08\x8a\xc3\xc7\x26\x60\xda\x10\x7d\x28\x89\x7c\x0e\x77\
\xf7\xc4\xba\xf3\x15\x8d\xc5\x1e\x60\xc9\xa0\x3d\x29\x51\x8d\xc2\
\x3d\x93\x61\xdd\xe1\x8a\xb0\xb8\x15\x18\x79\x48\xaf\x4a\x14\xe3\
\x70\x53\x1d\xac\x3b\x59\x51\x2d\x1e\x47\x77\xb9\xa2\x3b\x19\x37\
\x0d\xdb\xba\x73\x15\x71\x62\x33\x6e\x11\x0b\x89\x60\x16\xf0\x06\
\xf6\x9d\xaa\x88\x1b\xaf\xa1\xf9\x5c\x95\x2d\x44\xd3\x45\x5a\x39\
\xf6\x02\xf3\x91\x20\x8b\xd1\x83\x4c\xed\x10\x7d\xc0\x22\xc4\xcb\
\x55\xc0\x7b\xd8\x77\x9e\x22\x4f\xbc\x0b\x2c\x45\x1a\x72\x25\xb0\
\x1f\xfb\x4e\x53\xe4\x8d\xfd\x28\x49\x86\xb5\x18\x1d\x39\xda\x39\
\xde\x45\xa7\x5b\x43\x5a\x88\xae\x39\x14\xee\x9a\x44\x17\xee\x07\
\x99\x85\xee\x56\x29\x3e\x88\xbd\x14\x72\x0b\xb8\x84\xf5\x64\x4f\
\x06\x36\x00\xc7\x58\x37\x44\x8a\xf2\x1f\xe0\x2c\x8c\xd7\x13\x1e\
\x61\xb9\x73\xdc\xf4\x91\x47\x50\x72\xc8\xa1\x26\x00\x0f\x63\x3c\
\x2d\xc5\x32\x41\x3e\x02\x3c\x08\x9c\x62\xd8\x06\x29\xdb\x54\xe0\
\xb7\x18\x4e\x70\xb4\x9c\x59\x79\x1b\x9a\x02\x2d\xc3\x9b\x8c\x5b\
\x84\x63\xb5\x75\x43\x72\xba\x12\xfb\x0b\x41\x45\x73\xc5\x67\x30\
\x60\x71\x91\xde\x83\xbb\x28\x3f\xc2\x60\xdf\xd2\xbc\xf6\xe0\xee\
\x6c\x3d\x9b\x73\xa7\xb9\x13\x64\x0c\xee\xd5\x60\x7a\x04\x53\x42\
\x6c\xc2\x25\x49\x7f\xae\x1d\x8e\xca\xb5\xa3\xba\x5b\x28\x23\x39\
\xde\xc3\xdd\x20\x58\x0b\xec\x4c\xbc\xaf\x13\x70\x33\x04\xce\x88\
\x58\xe7\x8b\xb8\x15\x23\x9f\xc3\x7d\x96\x54\x46\xe3\xda\xfd\x59\
\xca\x78\xc8\x69\x06\xf0\x23\xe0\x3a\xeb\x86\xa4\xb0\x10\xfb\xf3\
\xd8\x01\xdc\x73\x08\x9f\x4c\xfc\x59\x07\x73\x69\x7d\xdf\x55\xdb\
\x7f\x37\x1f\xbc\x0d\x37\x97\x6e\xe0\xae\x08\x6d\x8f\x11\x35\x5a\
\xf0\x97\xf6\xb1\x94\xb3\x6e\xd5\x9c\xb4\x1f\xf5\xb0\x26\x01\xaf\
\x13\xde\xf6\x0d\xd8\xde\x79\xbc\x69\x88\x76\xe5\x8e\x57\x80\xa3\
\x13\x7f\xd6\xac\x56\x60\xff\xa5\x0e\xe0\x9e\x69\xb7\x76\x0d\xe1\
\xed\xbf\xc4\xa0\xbd\x07\x1a\x81\x7b\x5c\xd6\xba\x1f\x07\x70\x47\
\xb4\x96\x30\x0f\xfb\x2f\xf3\xfd\xf8\x4e\xda\x8f\xda\x90\x63\x09\
\x6f\xff\x51\x06\xed\x3d\xd8\x32\xec\xfb\x71\x00\x77\xaa\x75\x7e\
\xe2\xcf\x9a\xfc\x97\xf4\x31\xc0\x9d\x89\xf7\xe1\x23\xf5\x05\x79\
\x23\xde\x08\x2c\xb7\x1f\xf7\x66\x5a\x6b\xa1\xed\x8f\xad\x03\x77\
\x14\x49\xba\xaa\x7c\xea\x04\xb9\x11\x38\x29\xf1\x3e\xa4\x7d\x9d\
\x02\x7c\x3d\xe5\x0e\x52\x26\xc8\x89\xb8\xb7\xa5\x8a\xa4\xf4\x0d\
\xe0\xf8\x54\x95\xa7\x4c\x90\x65\xb8\x53\x2c\x91\x94\x8e\x04\x6e\
\x4e\x55\x79\xaa\x04\x99\x85\xfb\x71\x49\x24\x87\xcf\x03\x33\x53\
\x54\x9c\x2a\x41\x7e\x90\xa8\x5e\x91\xc1\x74\x00\xdf\x4f\x51\x71\
\x8a\x04\x99\x8b\xde\xa9\x2d\xf9\x5d\x08\xcc\x8e\x5d\x69\x8a\x04\
\xf9\x5e\x82\x3a\x45\x1a\x11\x7d\xec\xc5\x4e\x90\x79\x24\xc8\x62\
\x91\x06\xcd\x05\xce\x8b\x59\x61\xec\x04\xd1\x6d\x5d\xb1\x76\x43\
\xcc\xca\x62\x26\x48\x0f\x2d\x38\xcb\x52\x9a\xce\x45\xb8\x67\xd9\
\xa3\x88\x99\x20\x49\x7f\xd1\x14\x69\x50\x07\x11\xc7\x62\xac\x04\
\x99\x80\xd6\x55\x95\x72\x5c\x45\xa4\xa5\xa4\x62\x25\xc8\x97\xc8\
\xff\x10\x8f\xc8\x50\x3a\x81\x2f\xc4\xa8\x28\x56\x82\x5c\x13\xa9\
\x1e\x91\x58\xae\x8d\x51\x49\x8c\x04\x99\x0b\x7c\x22\x42\x3d\x22\
\x31\x4d\x01\xce\xa9\x5a\x49\x8c\x04\xb9\x3a\x42\x1d\x22\x29\x54\
\x1e\x9b\x55\x13\xa4\x0b\xbd\xcf\x41\xca\x75\x39\x15\xaf\x8d\xab\
\x26\xc8\x45\x94\xf1\x18\xa8\xc8\x60\xba\x71\x73\xb4\x82\x55\x4d\
\x10\xad\xad\x2b\xa5\xbb\xa2\x4a\xe1\x2a\x09\xd2\x05\x5c\x5c\x65\
\xe7\x22\x19\x5c\x8a\xbb\xed\x1b\xa4\x4a\x82\xcc\xc1\x3d\xcd\x25\
\x52\xb2\xa3\x80\x73\x43\x0b\x57\x49\x90\x85\x15\xca\x8a\xe4\x14\
\x3c\x56\x95\x20\xd2\x0e\x16\x84\x16\x0c\x4d\x90\x93\xd0\x8f\x83\
\xd2\x3c\xa6\xe1\x56\xd9\xf1\x16\x9a\x20\xc9\x57\xb4\x13\x89\x2c\
\xe8\x41\xaa\xd0\x04\xd1\x53\x83\xd2\x6c\x82\xa6\x9d\x28\x41\xa4\
\x5d\x04\x8d\xd9\x90\x04\x19\x07\x9c\x1a\xb2\x33\x11\x43\xd3\x09\
\x78\x09\x50\x48\x82\xcc\xc4\xe6\xdd\x86\x22\x55\x74\x00\xa7\xfb\
\x16\x0a\x49\x90\xd3\x02\xca\x88\x94\x60\x86\x6f\x81\x90\x04\xe9\
\x09\x28\x23\x52\x82\x2c\x09\xe2\xbd\x13\x91\x42\x78\xff\x73\xf7\
\x4d\x90\x11\x44\x5c\x52\x45\x24\xb3\x69\x78\x5e\x3f\xfb\x26\xc8\
\x09\x68\x71\x06\x69\x5e\x63\x80\x89\x3e\x05\x7c\x13\x64\x92\xe7\
\xf6\x22\xa5\x99\xec\xb3\xb1\x6f\x82\x78\x55\x2e\x52\xa0\x49\x3e\
\x1b\xeb\x08\x22\xed\x26\xe9\x11\xc4\xeb\xfc\x4d\xa4\x40\x49\xaf\
\x41\xc6\x7b\x6e\x2f\x52\x1a\xaf\x31\xac\x04\x91\x76\xe3\xb5\x66\
\xaf\x12\x44\xda\x4d\xd2\x23\xc8\x58\xcf\xed\x45\x4a\xe3\x35\x86\
\x7d\x13\xa4\xcb\x73\x7b\x91\xd2\x78\x8d\x61\xdf\x04\x09\x5e\x5f\
\x48\xa4\x10\x5e\x63\xd8\x37\x41\x34\xcd\x44\x9a\x9d\xd7\x18\x56\
\x82\x48\xbb\x49\x7a\x04\x11\x69\x2b\xbe\x09\xb2\x2f\x49\x2b\x44\
\xf2\xe9\xf7\xd9\xd8\x37\x41\xbc\x2a\x17\x29\x90\xd7\x3f\x79\x1d\
\x41\xa4\xdd\x24\x3d\x82\xf4\x79\x6e\x2f\x52\x1a\xaf\x31\xec\x9b\
\x20\x3b\x3c\xb7\x17\x29\x8d\xd7\x18\xf6\x4d\x90\xed\x9e\xdb\x8b\
\x94\xc6\x6b\x0c\x2b\x41\xa4\xdd\xbc\xe9\xb3\xb1\x6f\x82\x78\x55\
\x2e\x52\xa0\xa4\x47\x90\x6d\x9e\xdb\x8b\x94\xc6\x6b\x0c\xfb\x26\
\xc8\x0b\x9e\xdb\x8b\x94\x66\xab\xcf\xc6\x4a\x10\x69\x37\x49\x13\
\xc4\xab\x72\x91\x02\x25\x4d\x90\x57\xd0\x74\x13\x69\x5e\xef\x00\
\xaf\xf9\x14\xf0\x4d\x90\x1a\xb0\xc5\xb3\x8c\x48\x29\x36\x03\x03\
\x3e\x05\x42\xa6\xbb\x6f\x0a\x28\x23\x52\x02\xef\xb1\xab\x04\x91\
\x76\x92\x25\x41\x9e\x0e\x28\x23\x52\x82\x67\x7c\x0b\x84\x24\xc8\
\x93\x78\x9e\xc7\x89\x14\xa0\x06\x6c\xf4\x2d\x14\x92\x20\xff\x45\
\x17\xea\xd2\x7c\x7a\x81\xb7\x7c\x0b\x85\x3e\x93\xbe\x3e\xb0\x9c\
\x88\x95\xa0\x31\xab\x04\x91\x76\xb1\x2e\xa4\x50\x68\x82\xac\x09\
\x2c\x27\x62\x65\x6d\x48\xa1\xd0\x04\xd9\x0a\x3c\x17\x58\x56\x24\
\xb7\x5e\xe0\xa5\x90\x82\x55\xd6\xc5\x5a\x55\xa1\xac\x48\x4e\xc1\
\x63\xb5\x4a\x82\x3c\x5a\xa1\xac\x48\x4e\x26\x09\xb2\x06\xd8\x53\
\xa1\xbc\x48\x0e\xbb\x08\xbc\x40\x87\x6a\x09\xd2\x07\xac\xac\x50\
\x5e\x24\x87\x87\xa8\xb0\x9e\x5b\xd5\xb5\x79\x7f\x53\xb1\xbc\x48\
\x6a\x95\xc6\x68\xd5\x04\x59\x05\xec\xae\x58\x87\x48\x2a\x3b\x81\
\xc7\xaa\x54\x50\x35\x41\xfa\x81\x07\x2b\xd6\x21\x92\xca\xef\x80\
\x77\xab\x54\x10\xe3\xf5\x07\x2b\x22\xd4\x21\x92\x42\xe5\xb1\x19\
\x23\x41\xd6\x00\xcf\x47\xa8\x47\x24\xa6\xcd\x44\x98\x12\x15\xeb\
\x05\x3a\xcb\x23\xd5\x23\x12\x4b\x94\x31\x19\x2b\x41\xee\x41\x8b\
\x39\x48\x39\xfa\x81\xfb\x62\x54\x14\x2b\x41\xde\x00\x1e\x88\x54\
\x97\x48\x55\xf7\x11\x69\x1d\xe9\x98\xef\x28\xbc\x35\x62\x5d\x22\
\xa1\x06\x88\x38\x16\x63\x26\x48\x2f\x9a\x9f\x25\xf6\x56\x12\x71\
\xa6\x79\xec\xb7\xdc\xde\x12\xb9\x3e\x11\x5f\x51\xc7\x60\xec\x04\
\xf9\x13\x81\x0f\xa6\x88\x44\xf0\x47\x2a\x4c\x4c\x1c\x4c\x8a\xf7\
\xa4\xdf\x94\xa0\x4e\x91\x46\x44\x1f\x7b\x29\x12\x64\x2d\x2e\x93\
\x45\x72\x7a\x04\xd8\x10\xbb\xd2\x14\x09\x02\xf0\x2d\xb4\x76\x96\
\xe4\x33\x40\xa2\x33\x97\x54\x09\xf2\x17\xf4\xbb\x88\xe4\x73\x0f\
\xf0\x54\x8a\x8a\x53\x25\x08\xc0\x8d\xc0\xdb\x09\xeb\x17\x01\xf7\
\xb8\xc5\x37\x53\x55\x9e\x32\x41\x5e\x06\x96\x25\xac\x5f\x04\xe0\
\x87\x78\xbe\xf3\xc3\x47\xca\x04\x01\x77\x4f\xfa\x9f\x89\xf7\x21\
\xed\x6b\x0b\x70\x5b\xca\x1d\xa4\x4e\x90\x3e\xe0\xcb\x89\xf7\xe1\
\xe3\x48\xeb\x06\x00\x47\x07\x96\x1b\x09\x74\xc5\x6c\x48\xa0\xd0\
\xf6\xc7\x36\x00\x5c\x4b\xe2\x49\xb2\xa9\x13\x04\xdc\x8f\x87\xbf\
\xc8\xb0\x9f\x46\xcc\xb0\x6e\x00\x70\x7a\x85\xb2\x3d\xd1\x5a\x11\
\xae\x4a\xfb\x63\xba\x93\xc8\x3f\x0a\x5a\xea\x06\x5e\xc5\x65\xbd\
\x65\xec\x05\x26\x26\xfe\xac\xc3\x79\x88\xf0\xf6\xff\xdc\xa0\xbd\
\x07\x9a\x8a\x7b\x84\xd5\xba\x1f\x5f\x06\x3e\x9a\xf8\xb3\x66\x77\
\x21\xee\x1d\x0d\xd6\x5f\xee\x9f\x81\x09\x89\x3f\xeb\x60\x46\x03\
\x3f\x0e\x68\xef\x81\x51\x03\xbe\x92\xbb\xe1\x75\xd3\x71\x4f\x8e\
\x5a\xf7\x5f\x0d\xb8\x20\xf1\x67\xfd\xbf\x8e\x5c\x3b\xaa\xbb\x1d\
\xf8\x6a\xe6\x7d\x0e\x66\x37\xee\x81\xfe\xbf\xe3\xde\x19\x31\x90\
\x68\x3f\x1d\xc0\x51\xb8\xff\xbc\x97\x02\xc7\x47\xaa\xf7\x69\xe0\
\x61\xe0\x45\x2a\xac\xf9\xd4\x80\x51\xc0\x71\xc0\xd9\xc0\x7c\xdc\
\x75\x90\xb5\x9f\x00\xd7\xe7\xda\x59\xee\x04\xe9\x04\xfe\x4a\x19\
\xe7\xd2\xd2\x7c\xfe\x06\x9c\x45\xda\x7f\x0a\x1f\x92\x3b\x41\x00\
\xa6\xe1\x7e\x69\x2f\xe1\x8e\x92\x34\x8f\xdd\xc0\x19\x64\x7e\xab\
\x40\x8e\xbb\x58\x07\xdb\x0c\x5c\x6d\xb0\x5f\x69\x5e\x03\xc0\x17\
\x31\x78\xe5\x86\xd5\x39\x65\x2f\xee\x2e\xc4\x2c\xa3\xfd\x4b\x73\
\x59\x06\xdc\x61\xdd\x88\xdc\x46\x01\x8f\x63\x7f\x57\x44\x51\x76\
\x3c\x86\xe1\xcd\x01\x8b\x6b\x90\x03\x75\xe3\x6e\xbb\x4e\x31\x6e\
\x87\x94\xa9\x17\x98\x4d\xc0\xdb\x69\x63\xb1\x4e\x10\x80\xc9\xb8\
\x07\x5d\x2c\x7e\x9b\x90\x72\x6d\xc3\xdd\xb1\xfa\xb7\x65\x23\x2c\
\x2e\xd2\x0f\xb6\x15\xb8\x04\x4d\x8d\x97\x0f\xec\xc5\x8d\x09\xd3\
\xe4\x80\x32\x12\x04\xdc\x6d\xdf\xcb\xc9\x78\x7f\x5b\x8a\xd5\x0f\
\x2c\x02\x9e\xb4\x6e\x48\x89\x2e\xa3\x8c\xb9\x3e\x0a\x9b\xd8\x07\
\x5c\x8c\x1c\xd6\x52\x60\x3f\xf6\x9d\xa5\xc8\x1b\xfb\x81\x25\x48\
\x43\x96\xa2\x23\x49\x3b\xc5\x3e\xe0\x0a\xc4\xcb\x22\xdc\x03\x57\
\xd6\x9d\xa7\x48\x1b\x7d\xb8\x0b\x72\x09\x30\x1f\x77\x47\xc3\xba\
\x13\x15\x69\x62\x0f\x19\xa7\xae\xb7\xaa\x33\x81\xd7\xb1\xef\x4c\
\x45\xdc\x78\x15\xf8\x14\x12\xc5\x64\xe0\x59\xec\x3b\x55\x11\x27\
\x9e\x01\x4e\x44\xa2\x1a\x8b\xe6\x6e\xb5\x42\x3c\x46\x39\x0b\x3f\
\xb4\x9c\x91\xb8\x27\xca\xac\x3b\x59\x11\x16\x37\x53\xc6\x53\x89\
\x2d\x6f\x09\xee\x01\x1a\xeb\x0e\x57\x34\x16\xbb\x70\x33\x25\x24\
\xa3\x69\xb8\x67\xb3\xad\x3b\x5f\x71\xf8\x78\x0a\x38\x75\x88\x3e\
\x94\xc4\x3a\x71\x2b\xeb\x95\xb0\x5a\x8a\xe2\xc3\x51\xc3\xad\xe2\
\x32\x7a\xc8\xde\x93\x6c\xe6\x53\xc6\xba\x5b\x0a\x17\x2f\xa3\xdf\
\x37\x8a\xd3\x0d\xdc\x85\x8e\x26\x96\x51\x03\x7e\x46\x0b\x2e\xea\
\xd6\x4a\xe6\x50\xc6\x02\x67\xed\x16\xcf\x02\xe7\x0c\xdf\x3d\x52\
\x82\x2e\xdc\x5b\xae\xf6\x60\x3f\x70\x5a\x3d\x76\xe1\xde\x05\xd3\
\xd9\x50\xcf\x48\x51\x8e\x07\xee\x45\xa7\x5d\x29\x62\x3f\x6e\x51\
\xf2\xe3\x1a\xee\x0d\x29\xd6\x4c\xe0\x51\xec\x07\x55\xab\xc4\xc3\
\x68\x1e\x55\x4b\x9a\x0d\x3c\x81\xfd\x00\x6b\xd6\x58\x8d\x5b\x48\
\x41\x5a\xdc\x79\xc0\x4a\x74\xea\xd5\x48\xd4\x80\xdf\xa3\x0b\xf0\
\xb6\x34\x15\x58\x8e\x1e\xcc\x1a\x2c\xfa\x70\xb7\xcd\xf5\x2b\xb8\
\x70\x0c\xf0\x35\x34\xa5\x7e\x00\xb7\x58\xdb\x75\xc0\xf8\x4a\xdf\
\xa8\xb4\xac\x73\x80\x15\xc0\x4e\xec\x07\x6b\xae\xd8\x81\xbb\x23\
\x75\x76\x84\xef\x4f\xda\xc4\x68\xdc\xb3\xd2\xf7\xe3\xee\xf5\x5b\
\x0f\xe2\xd8\xf1\x16\x70\x1f\xf0\x69\x34\x5f\x6a\x48\x25\x2c\x3d\
\xda\x0c\x3a\x81\x73\x81\x85\xc0\x02\xdc\x4c\xe2\x66\xd4\x0b\xac\
\xaa\xc7\x3a\xb4\x50\xdf\xb0\x94\x20\x61\x4e\x04\xce\xc7\xdd\x36\
\x9e\x8d\x7b\x7f\x5f\x69\xdf\x65\x0d\x97\x10\xeb\x71\xc9\xb0\x16\
\x78\xc9\xb4\x45\x4d\xa8\xb4\x4e\x6d\x56\xdd\xb8\xd7\x23\xcf\xa8\
\x47\x0f\xee\x28\x33\x26\xd3\xfe\xdf\xc1\xbd\x98\x68\x53\x3d\x9e\
\x01\x36\x62\xb8\x2a\x7a\xab\x50\x82\xa4\xd3\x81\x7b\xe5\xf4\x64\
\x60\x52\xfd\xef\x44\xdc\xdd\xa1\x63\xea\x7f\xc7\xe2\xe6\x8d\x75\
\xe2\xae\x03\xde\x9f\xcf\xd4\x8f\x3b\xfd\xe9\xc7\xdd\x6e\xdd\x01\
\x6c\x07\xde\xac\xff\xdd\x86\x5b\xf4\xfb\xfd\x78\x0d\x77\x5d\x21\
\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\
\x22\x36\xfe\x07\x97\x61\xbb\x2d\xca\x25\x4e\x76\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x0c\
\x07\xb6\xd0\xa7\
\x00\x67\
\x00\x75\x00\x69\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x02\x8c\x59\xa7\
\x00\x70\
\x00\x6c\x00\x61\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x09\xc7\xa6\xc7\
\x00\x72\
\x00\x65\x00\x73\x00\x65\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0c\x98\xba\x47\
\x00\x70\
\x00\x61\x00\x75\x00\x73\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x02\
\x00\x00\x00\x2e\x00\x00\x00\x00\x00\x01\x00\x00\x32\xdb\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x44\x00\x00\x00\x00\x00\x01\x00\x00\x42\x2b\
\x00\x00\x00\x5c\x00\x00\x00\x00\x00\x01\x00\x00\x55\x1a\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 64.334978 | 96 | 0.727042 |
19cd3375c40063ac5ed4cd8f161c233a36949249 | 8,671 | py | Python | osc_discoverer.py | gounau-dev/upload-scripts | c8a90926923795926d794ac7db15dea61d9df49b | [
"MIT"
] | null | null | null | osc_discoverer.py | gounau-dev/upload-scripts | c8a90926923795926d794ac7db15dea61d9df49b | [
"MIT"
] | null | null | null | osc_discoverer.py | gounau-dev/upload-scripts | c8a90926923795926d794ac7db15dea61d9df49b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""This script is used to discover video files, sensor info and return Sequences."""
import os
import json
import logging
from typing import List, cast
import constants
from common.models import GPS, OSCDevice
from io_storage.storage import Local
from parsers.exif import ExifParser
from parsers.osc_metadata.parser import metadata_parser
from visual_data_discover import VisualDataDiscoverer
from visual_data_discover import ExifPhotoDiscoverer
from visual_data_discover import PhotoMetadataDiscoverer
from visual_data_discover import VideoDiscoverer
from validators import SequenceValidator, SequenceMetadataValidator, SequenceFinishedValidator
from osc_utils import unzip_metadata
from osc_models import Sequence, Photo, VisualData
LOGGER = logging.getLogger('osc_tools.osc_discoverer')
class OSCUploadProgressDiscoverer:
"""This class is responsible with finding a upload progress file"""
def __eq__(self, other):
if isinstance(other, OSCUploadProgressDiscoverer):
return self == other
return False
@classmethod
def discover(cls, path: str) -> List[str]:
"""this method will discover a upload progress file and parse it to get a progress list."""
LOGGER.debug("will read uploaded indexes")
progress_file_path = path + "/" + constants.PROGRESS_FILE_NAME
if not os.path.isfile(progress_file_path):
return []
with open(progress_file_path, 'r') as input_file:
line = input_file.readline()
indexes = list(filter(None, line.split(";")))
return indexes
class OSCMetadataDiscoverer:
"""this class will discover a metadata file"""
def __eq__(self, other):
if isinstance(other, OSCMetadataDiscoverer):
return self == other
return False
@classmethod
def discover(cls, path: str) -> str:
"""This method will discover osc metadata path"""
files = os.listdir(path)
for file_path in files:
file_name, file_extension = os.path.splitext(file_path)
if ".txt" in file_extension and "track" in file_name:
return path + "/" + file_path
if ".gz" in file_extension and "track" in file_name:
return unzip_metadata(path)
return None
# if no metadata found generate metadata from gpx or exif
class OnlineIDDiscoverer:
"""This class will discover online id of a sequence"""
@classmethod
def discover(cls, path: str) -> str:
"""This method will discover online id"""
LOGGER.debug("searching for metadata %s", path)
sequence_file_path = path + "/osc_sequence_id.txt"
if not os.path.isfile(sequence_file_path):
return None
try:
with open(sequence_file_path) as json_file:
data = json.load(json_file)
if "id" in data and data["id"] and str.isdigit(data["id"]):
return int(data["id"])
except FileNotFoundError:
return None
return None
@classmethod
def discover_using_type(cls, path: str, osc_type: str):
"""this method is discovering the online id"""
print(path)
print(osc_type)
class SequenceDiscoverer:
"""Seq discoverer base class"""
def __init__(self):
self.ignored_for_upload: bool = False
self.name = "default"
self.online_id: OnlineIDDiscoverer = OnlineIDDiscoverer()
self.visual_data: VisualDataDiscoverer = VisualDataDiscoverer()
self.osc_metadata: OSCMetadataDiscoverer = OSCMetadataDiscoverer()
self.upload_progress: OSCUploadProgressDiscoverer = OSCUploadProgressDiscoverer()
self.validator: SequenceValidator = SequenceValidator()
def discover(self, path: str) -> [Sequence]:
"""This method will discover a valid sequence"""
files = os.listdir(path)
sequences = []
for file_path in files:
full_path = os.path.join(path, file_path)
if os.path.isdir(full_path):
sequences = sequences + self.discover(full_path)
sequence = self.create_sequence(path)
if self.validator.validate(sequence):
sequences.append(sequence)
else:
LOGGER.debug("This sequence (%s) does not conform to this discoverer %s.", path,
self.name)
return sequences
def create_sequence(self, path):
"""This method will discover all attributes af a sequence"""
sequence = Sequence()
if self.online_id:
sequence.online_id = self.online_id.discover(path)
if self.visual_data:
(visual_data, data_type) = self.visual_data.discover(path)
sequence.visual_items = visual_data
sequence.visual_data_type = data_type
if self.osc_metadata:
sequence.osc_metadata = self.osc_metadata.discover(path)
if self.upload_progress:
sequence.progress = self.upload_progress.discover(path)
sequence.path = path
self._find_latitude_longitude_device_info(sequence)
return sequence
def _find_latitude_longitude_device_info(self, sequence: Sequence):
if not sequence.online_id:
if sequence.osc_metadata and isinstance(self.validator, SequenceMetadataValidator):
parser = metadata_parser(sequence.osc_metadata, Local())
gps = cast(GPS, parser.next_item_with_class(GPS))
if gps:
sequence.latitude = gps.latitude
sequence.longitude = gps.longitude
device_info: OSCDevice = cast(OSCDevice, parser.next_item_with_class(OSCDevice))
if device_info:
sequence.device = device_info.device_raw_name
sequence.platform = device_info.platform_name
elif sequence.visual_items:
visual_item: VisualData = sequence.visual_items[0]
if isinstance(self.visual_data, ExifPhotoDiscoverer):
parser = ExifParser(visual_item.path, Local())
device_info: OSCDevice = parser.next_item_with_class(OSCDevice)
if device_info:
sequence.device = device_info.device_raw_name
sequence.platform = device_info.platform_name
if isinstance(visual_item, Photo):
sequence.latitude = visual_item.latitude
sequence.longitude = visual_item.longitude
class SequenceDiscovererFactory:
"""Class that builds a list of sequence discoverers ready to use."""
@classmethod
def discoverers(cls) -> [SequenceDiscoverer]:
"""This is a factory method that will return Sequence Discoverers"""
return [cls.finished_discoverer(),
cls.photo_metadata_discoverer(),
cls.exif_discoverer(),
cls.video_discoverer()]
@classmethod
def photo_metadata_discoverer(cls) -> SequenceDiscoverer:
"""This method will return a photo discoverer"""
photo_metadata_finder = SequenceDiscoverer()
photo_metadata_finder.name = "Metadata-Photo"
photo_metadata_finder.visual_data = PhotoMetadataDiscoverer()
photo_metadata_finder.validator = SequenceMetadataValidator()
return photo_metadata_finder
@classmethod
def exif_discoverer(cls) -> SequenceDiscoverer:
"""This method will return a photo discoverer"""
exif_photo_finder = SequenceDiscoverer()
exif_photo_finder.name = "Exif-Photo"
exif_photo_finder.visual_data = ExifPhotoDiscoverer()
exif_photo_finder.osc_metadata = None
return exif_photo_finder
@classmethod
def video_discoverer(cls) -> SequenceDiscoverer:
"""this method will return a video discoverer"""
video_finder = SequenceDiscoverer()
video_finder.name = "Metadata-Video"
video_finder.visual_data = VideoDiscoverer()
video_finder.validator = SequenceMetadataValidator()
return video_finder
@classmethod
def finished_discoverer(cls) -> SequenceDiscoverer:
"""this method will return a discoverer that finds all the sequences that finished upload"""
finished_finder = SequenceDiscoverer()
finished_finder.name = "Done Uploading"
finished_finder.ignored_for_upload = True
finished_finder.visual_data = None
finished_finder.osc_metadata = None
finished_finder.validator = SequenceFinishedValidator()
return finished_finder
| 40.518692 | 100 | 0.663361 |
8dfeb4201c8da1714390d8be722dcc31d08bb987 | 397 | py | Python | leads/migrations/0003_auto_20190211_1142.py | sauravpanda/Django-CRM | c6b8cde02c9cf3d3f30f4e05b825f77d00734e87 | [
"MIT"
] | 1,334 | 2017-06-04T07:47:14.000Z | 2022-03-30T17:12:37.000Z | leads/migrations/0003_auto_20190211_1142.py | AhmedDoudou/Django-CRM-1 | 5faf22acb30aeb32f5830898fd5d8ecd1ac0bbd8 | [
"MIT"
] | 317 | 2017-06-04T07:48:13.000Z | 2022-03-29T19:24:26.000Z | leads/migrations/0003_auto_20190211_1142.py | AhmedDoudou/Django-CRM-1 | 5faf22acb30aeb32f5830898fd5d8ecd1ac0bbd8 | [
"MIT"
] | 786 | 2017-06-06T09:18:48.000Z | 2022-03-29T01:29:29.000Z | # Generated by Django 2.1.5 on 2019-02-11 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("leads", "0002_lead_tags"),
]
operations = [
migrations.AlterField(
model_name="lead",
name="email",
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
| 20.894737 | 75 | 0.594458 |
68648b0b0bd547b469412fc6abbba1805d97a329 | 3,273 | py | Python | main.py | matsueushi/discogs-xml-audacity | 407781206980194462b25cc064a64f83fcf29067 | [
"MIT"
] | null | null | null | main.py | matsueushi/discogs-xml-audacity | 407781206980194462b25cc064a64f83fcf29067 | [
"MIT"
] | null | null | null | main.py | matsueushi/discogs-xml-audacity | 407781206980194462b25cc064a64f83fcf29067 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import os
import discogs_client
import re
import urllib
import xml.dom.minidom
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
SAVE_PATH = os.getenv('SAVE_PATH')
USER_TOKEN = os.getenv('USER_TOKEN')
FILE_NAME_TEMPLATE = os.getenv('FILE_NAME_TEMPLATE')
d = discogs_client.Client('xml-for-audacity/0.1', user_token=USER_TOKEN)
def trim_artist_name(name):
return re.sub(' \(\d\)$', '', name)
def audacity_tag(name, val):
disc_info_xml = xml.dom.minidom.Document()
subnode = disc_info_xml.createElement('tag')
subnode_attr = disc_info_xml.createAttribute('name')
subnode_attr.value = name
subnode.setAttributeNode(subnode_attr)
subnode_attr = disc_info_xml.createAttribute('value')
subnode_attr.value = str(val)
subnode.setAttributeNode(subnode_attr)
return subnode
def discogs_info_toxml(release):
info = {}
disc_info_dic = {}
disc_info_dic['YEAR'] = release.year
disc_info_dic['GENRE'] = release.genres[0]
# remove " (*)" of "artist name (*)"
disc_info_dic['ARTIST'] = trim_artist_name(release.artists[0].name)
disc_info_dic['ALBUM'] = release.title
for i, t in enumerate(release.tracklist):
disc_info_xml = xml.dom.minidom.Document()
tags = disc_info_xml.createElement('tags')
disc_info_xml.appendChild(tags)
tags.appendChild(audacity_tag('TITLE', t.title))
tags.appendChild(audacity_tag('TRACKNUMBER', i + 1))
for name, val in disc_info_dic.items():
tags.appendChild(audacity_tag(name, val))
# print(disc_info_xml.toprettyxml())
file_name_dict = {
"artist": trim_artist_name(release.artists[0].name),
"album": release.title,
"number": i + 1,
"song": t.title}
try:
file_name = FILE_NAME_TEMPLATE.format(**file_name_dict)
except KeyError:
file_name = "{number:02} {song}.xml".format(
**file_name_dict)
file_name = re.sub('/', '_', file_name)
print(file_name)
info[file_name] = disc_info_xml
return info
def download_album_artwork(release, save_path):
image_url = release.images[0]['uri']
try:
image_name = '%s.jpg' % release.title
urllib.request.urlretrieve(
image_url, os.path.join(save_path, image_name))
except:
sys.exit('Unable to download image')
def download_album_info(discogs_id):
release = d.release(discogs_id)
artist_name = trim_artist_name(release.artists[0].name)
sub_save_path = os.path.join(SAVE_PATH, artist_name, release.title)
if not os.path.exists(sub_save_path):
os.makedirs(sub_save_path)
for file_name, x in discogs_info_toxml(release).items():
xml_string = x.toprettyxml()
with open(os.path.join(sub_save_path, file_name),
'w', encoding='utf-8') as f:
f.write(xml_string)
download_album_artwork(release, sub_save_path)
print('complete!')
if __name__ == '__main__':
argv = sys.argv
if len(argv) != 2:
print('Usage: python %s release_id' % argv[0])
quit()
release_id = int(argv[1])
download_album_info(release_id)
| 31.171429 | 72 | 0.655362 |
30bd9dbdb5efe1e6f99d28d04bb787c09501e7f1 | 11,925 | py | Python | fortran/generate-code.py | mschollmeier/xraylib | abf5005cd2a9c8d42d7d70a0f1ef21b6c6bd7ccd | [
"BSD-3-Clause"
] | null | null | null | fortran/generate-code.py | mschollmeier/xraylib | abf5005cd2a9c8d42d7d70a0f1ef21b6c6bd7ccd | [
"BSD-3-Clause"
] | null | null | null | fortran/generate-code.py | mschollmeier/xraylib | abf5005cd2a9c8d42d7d70a0f1ef21b6c6bd7ccd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
if sys.version_info.minor < 6:
raise Exception("Execute this script with at least python 3.6 to ensure all dicts are ordered!")
XRL_FUNCTIONS = {
'AtomicWeight': {'Z': int},
'ElementDensity': {'Z': int},
'CS_Total': {'Z': int, 'E': float},
'CS_Photo': {'Z': int, 'E': float},
'CS_Rayl': {'Z': int, 'E': float},
'CS_Compt': {'Z': int, 'E': float},
'CS_Energy': {'Z': int, 'E': float},
'CS_KN': {'E': float},
'CSb_Total': {'Z': int, 'E': float},
'CSb_Photo': {'Z': int, 'E': float},
'CSb_Rayl': {'Z': int, 'E': float},
'CSb_Compt': {'Z': int, 'E': float},
'DCS_Thoms': {'theta': float},
'DCS_KN': {'E':float, 'theta': float},
'DCS_Rayl': {'Z': int, 'E':float, 'theta': float},
'DCS_Compt': {'Z': int, 'E':float, 'theta': float},
'DCSb_Rayl': {'Z': int, 'E':float, 'theta': float},
'DCSb_Compt': {'Z': int, 'E':float, 'theta': float},
'DCSP_Thoms': {'theta': float, 'phi': float},
'DCSP_KN': {'E': float, 'theta': float, 'phi': float},
'DCSP_Rayl': {'Z': int, 'E': float, 'theta': float, 'phi': float},
'DCSP_Compt': {'Z': int, 'E': float, 'theta': float, 'phi': float},
'DCSPb_Rayl': {'Z': int, 'E': float, 'theta': float, 'phi': float},
'DCSPb_Compt': {'Z': int, 'E': float, 'theta': float, 'phi': float},
'FF_Rayl': {'Z': int, 'q': float},
'SF_Compt': {'Z': int, 'q': float},
'MomentTransf': {'E': float, 'theta': float},
'LineEnergy': {'Z': int, 'line': int},
'FluorYield': {'Z': int, 'shell': int},
'CosKronTransProb': {'Z': int, 'trans': int},
'EdgeEnergy': {'Z': int, 'shell': int},
'JumpFactor': {'Z': int, 'shell': int},
'CS_FluorLine': {'Z': int, 'line': int, 'E': float},
'CSb_FluorLine': {'Z': int, 'line': int, 'E': float},
'RadRate': {'Z': int, 'line': int},
'ComptonEnergy': {'E0': float, 'theta': float},
'Fi': {'Z': int, 'E': float},
'Fii': {'Z': int, 'E': float},
'CS_Photo_Total': {'Z': int, 'E': float},
'CSb_Photo_Total': {'Z': int, 'E': float},
'CS_Photo_Partial': {'Z': int, 'shell': int, 'E': float},
'CSb_Photo_Partial': {'Z': int, 'shell': int, 'E': float},
'CS_Total_Kissel': {'Z': int, 'E': float},
'CSb_Total_Kissel': {'Z': int, 'E': float},
'ComptonProfile': {'Z': int, 'pz': float},
'ComptonProfile_Partial': {'Z': int, 'shell': int, 'pz': float},
'ElectronConfig': {'Z': int, 'shell': int},
'ElectronConfig_Biggs': {'Z': int, 'shell': int},
'AtomicLevelWidth': {'Z': int, 'shell': int},
'AugerRate': {'Z': int, 'auger_trans': int},
'AugerYield': {'Z': int, 'shell': int},
'CS_FluorLine_Kissel': {'Z': int, 'line': int, 'E': float},
'CSb_FluorLine_Kissel': {'Z': int, 'line': int, 'E': float},
'CS_FluorLine_Kissel_Cascade': {'Z': int, 'line': int, 'E': float},
'CSb_FluorLine_Kissel_Cascade': {'Z': int, 'line': int, 'E': float},
'CS_FluorLine_Kissel_Nonradiative_Cascade': {'Z': int, 'line': int, 'E': float},
'CSb_FluorLine_Kissel_Nonradiative_Cascade': {'Z': int, 'line': int, 'E': float},
'CS_FluorLine_Kissel_Radiative_Cascade': {'Z': int, 'line': int, 'E': float},
'CSb_FluorLine_Kissel_Radiative_Cascade': {'Z': int, 'line': int, 'E': float},
'CS_FluorLine_Kissel_no_Cascade': {'Z': int, 'line': int, 'E': float},
'CSb_FluorLine_Kissel_no_Cascade': {'Z': int, 'line': int, 'E': float},
'PL1_pure_kissel': {'Z': int, 'E': float},
'PL1_rad_cascade_kissel': {'Z': int, 'E': float, 'PK': float},
'PL1_auger_cascade_kissel': {'Z': int, 'E': float, 'PK': float},
'PL1_full_cascade_kissel': {'Z': int, 'E': float, 'PK': float},
'PL2_pure_kissel': {'Z': int, 'E': float, 'PL1': float},
'PL2_rad_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float},
'PL2_auger_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float},
'PL2_full_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float},
'PL3_pure_kissel': {'Z': int, 'E': float, 'PL1': float, 'PL2': float},
'PL3_rad_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float},
'PL3_auger_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float},
'PL3_full_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float},
'PM1_pure_kissel': {'Z': int, 'E': float},
'PM1_rad_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float},
'PM1_auger_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float},
'PM1_full_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float},
'PM2_pure_kissel': {'Z': int, 'E': float, 'PM1': float},
'PM2_rad_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float},
'PM2_auger_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float},
'PM2_full_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float},
'PM3_pure_kissel': {'Z': int, 'E': float, 'PM1': float, 'PM2': float},
'PM3_rad_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float, 'PM2': float},
'PM3_auger_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float, 'PM2': float},
'PM3_full_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float, 'PM2': float},
'PM4_pure_kissel': {'Z': int, 'E': float, 'PM1': float, 'PM2': float, 'PM3': float},
'PM4_rad_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float, 'PM2': float, 'PM3': float},
'PM4_auger_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float, 'PM2': float, 'PM3': float},
'PM4_full_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float, 'PM2': float, 'PM3': float},
'PM5_pure_kissel': {'Z': int, 'E': float, 'PM1': float, 'PM2': float, 'PM3': float, 'PM4': float},
'PM5_rad_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float, 'PM2': float, 'PM3': float, 'PM4': float},
'PM5_auger_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float, 'PM2': float, 'PM3': float, 'PM4': float},
'PM5_full_cascade_kissel': {'Z': int, 'E': float, 'PK': float, 'PL1': float, 'PL2': float, 'PL3': float, 'PM1': float, 'PM2': float, 'PM3': float, 'PM4': float},
'CS_Total_CP': {'compound': str, 'E': float},
'CS_Photo_CP': {'compound': str, 'E': float},
'CS_Rayl_CP': {'compound': str, 'E': float},
'CS_Compt_CP': {'compound': str, 'E': float},
'CS_Energy_CP': {'compound': str, 'E': float},
'CSb_Total_CP': {'compound': str, 'E': float},
'CSb_Photo_CP': {'compound': str, 'E': float},
'CSb_Rayl_CP': {'compound': str, 'E': float},
'CSb_Compt_CP': {'compound': str, 'E': float},
'DCS_Rayl_CP': {'compound': str, 'E': float, 'theta': float},
'DCS_Compt_CP': {'compound': str, 'E': float, 'theta': float},
'DCSb_Rayl_CP': {'compound': str, 'E': float, 'theta': float},
'DCSb_Compt_CP': {'compound': str, 'E': float, 'theta': float},
'DCSP_Rayl_CP': {'compound': str, 'E': float, 'theta': float, 'phi': float},
'DCSP_Compt_CP': {'compound': str, 'E': float, 'theta': float, 'phi': float},
'DCSPb_Rayl_CP': {'compound': str, 'E': float, 'theta': float, 'phi': float},
'DCSPb_Compt_CP': {'compound': str, 'E': float, 'theta': float, 'phi': float},
'CS_Photo_Total_CP': {'compound': str, 'E': float},
'CS_Total_Kissel_CP': {'compound': str, 'E': float},
'CSb_Photo_Total_CP': {'compound': str, 'E': float},
'CSb_Total_Kissel_CP': {'compound': str, 'E': float},
'Refractive_Index_Re': {'compound': str, 'E': float, 'density': float},
'Refractive_Index_Im': {'compound': str, 'E': float, 'density': float}
}
def generate_declaration_for_str(arg_name: str) -> str:
return f'''CHARACTER (KIND=C_CHAR,LEN=*), INTENT(IN) :: {arg_name}
CHARACTER (KIND=C_CHAR), DIMENSION(:), ALLOCATABLE, TARGET :: &
{arg_name}_F'''
def generate_declaration_for_float(arg_name: str) -> str:
return f"REAL (C_DOUBLE), INTENT(IN) :: {arg_name}"
def generate_declaration_for_int(arg_name: str) -> str:
return f"INTEGER (C_INT), INTENT(IN) :: {arg_name}"
def generate_iface_declaration_for_str(arg_name: str) -> str:
return f"TYPE (C_PTR), INTENT(IN), VALUE :: {arg_name}"
def generate_iface_declaration_for_float(arg_name: str) -> str:
return f"REAL (C_DOUBLE), INTENT(IN), VALUE :: {arg_name}"
def generate_iface_declaration_for_int(arg_name: str) -> str:
return f"INTEGER (C_INT), INTENT(IN), VALUE :: {arg_name}"
GENERATE_DECLARATION = {
str: generate_declaration_for_str,
float: generate_declaration_for_float,
int: generate_declaration_for_int
}
GENERATE_IFACE_DECLARATION = {
str: generate_iface_declaration_for_str,
float: generate_iface_declaration_for_float,
int: generate_iface_declaration_for_int
}
def generate_preprocess_for_str(arg_name: str) -> str:
return f'''CALL stringF2C({arg_name}, {arg_name}_F)
'''
GENERATE_PREPROCESS = {
str: generate_preprocess_for_str,
float: lambda arg_name: "",
int: lambda arg_name: ""
}
GENERATE_CALL = {
str: lambda arg_name: f"C_LOC({arg_name}_F)",
float: lambda arg_name: f"{arg_name}",
int: lambda arg_name: f"{arg_name}"
}
def process_function(name: str) -> str:
args = XRL_FUNCTIONS[name]
arg_list = ', '.join(args.keys())
arg_declarations = '\n '.join([GENERATE_DECLARATION[arg_type](arg_name) for arg_name, arg_type in args.items()])
arg_preprocess = ''.join([GENERATE_PREPROCESS[arg_type](arg_name) for arg_name, arg_type in args.items()])
arg_calls = ', '.join([GENERATE_CALL[arg_type](arg_name) for arg_name, arg_type in args.items()])
arg_iface_declarations = '\n '.join([GENERATE_IFACE_DECLARATION[arg_type](arg_name) for arg_name, arg_type in args.items()])
return f'''
FUNCTION {name}({arg_list}, error) RESULT(rv)
USE, INTRINSIC :: ISO_C_BINDING
USE, INTRINSIC :: ISO_FORTRAN_ENV
IMPLICIT NONE
{arg_declarations}
REAL (C_DOUBLE) :: rv
TYPE(xrl_error), POINTER, OPTIONAL :: error
TYPE (C_PTR) :: errorPtr, errorPtrLoc
TARGET :: errorPtr
INTERFACE
FUNCTION {name}C({arg_list}, error) &
BIND(C,NAME='{name}')
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
{arg_iface_declarations}
REAL (C_DOUBLE) :: {name}C
TYPE (C_PTR),INTENT(IN),VALUE :: error
ENDFUNCTION {name}C
ENDINTERFACE
errorPtr = C_NULL_PTR
errorPtrLoc = C_NULL_PTR
IF (PRESENT(error)) THEN
IF (.NOT. ASSOCIATED(error)) THEN
errorPtrLoc = C_LOC(errorPtr)
ELSE
! print warning
WRITE (error_unit, '(A)') &
'error POINTER must be disassociated!'
ENDIF
ENDIF
{arg_preprocess}
rv = {name}C({arg_calls}, errorPtrLoc)
IF (C_ASSOCIATED(errorPtr)) THEN
CALL process_error(errorPtr, error)
ENDIF
ENDFUNCTION {name}
'''
with open('xraylib_wrap_generated.F90', 'w') as f:
f.write('! This file has been generated automatically using generate-code.py\n\n')
f.write('\n'.join([process_function(function) for function in XRL_FUNCTIONS.keys()]))
| 49.6875 | 166 | 0.58826 |
1b3ba5cf4dba23873eefa7a1335803905d8eb02e | 12,305 | py | Python | tests/utils/test_mol_to_graph.py | seqRep/dgl-lifesci | c4bd45be6dbb59dc270957ed90bb19d9ed6dc157 | [
"Apache-2.0"
] | 1 | 2020-06-22T19:19:24.000Z | 2020-06-22T19:19:24.000Z | tests/utils/test_mol_to_graph.py | seqRep/dgl-lifesci | c4bd45be6dbb59dc270957ed90bb19d9ed6dc157 | [
"Apache-2.0"
] | null | null | null | tests/utils/test_mol_to_graph.py | seqRep/dgl-lifesci | c4bd45be6dbb59dc270957ed90bb19d9ed6dc157 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import torch
from dgllife.utils.featurizers import *
from dgllife.utils.mol_to_graph import *
from rdkit import Chem
from rdkit.Chem import AllChem
test_smiles1 = 'CCO'
test_smiles2 = 'Fc1ccccc1'
test_smiles3 = '[CH2:1]([CH3:2])[N:3]1[CH2:4][CH2:5][C:6]([CH3:16])' \
'([CH3:17])[c:7]2[cH:8][cH:9][c:10]([N+:13]([O-:14])=[O:15])' \
'[cH:11][c:12]21.[CH3:18][CH2:19][O:20][C:21]([CH3:22])=[O:23]'
class TestAtomFeaturizer(BaseAtomFeaturizer):
def __init__(self):
super(TestAtomFeaturizer, self).__init__(
featurizer_funcs={'hv': ConcatFeaturizer([atomic_number])})
class TestBondFeaturizer(BaseBondFeaturizer):
def __init__(self):
super(TestBondFeaturizer, self).__init__(
featurizer_funcs={'he': ConcatFeaturizer([bond_is_in_ring])})
def test_smiles_to_bigraph():
# Test the case with self loops added.
g1 = smiles_to_bigraph(test_smiles1, add_self_loop=True)
src, dst = g1.edges()
assert torch.allclose(src, torch.LongTensor([0, 2, 2, 1, 0, 1, 2]))
assert torch.allclose(dst, torch.LongTensor([2, 0, 1, 2, 0, 1, 2]))
# Test the case without self loops.
test_node_featurizer = TestAtomFeaturizer()
test_edge_featurizer = TestBondFeaturizer()
g2 = smiles_to_bigraph(test_smiles2, add_self_loop=False,
node_featurizer=test_node_featurizer,
edge_featurizer=test_edge_featurizer)
assert torch.allclose(g2.ndata['hv'], torch.tensor([[9.], [6.], [6.], [6.],
[6.], [6.], [6.]]))
assert torch.allclose(g2.edata['he'], torch.tensor([[0.], [0.], [1.], [1.], [1.],
[1.], [1.], [1.], [1.], [1.],
[1.], [1.], [1.], [1.]]))
# Test the case where atoms come with a default order and we do not
# want to change the order, which is related to the application of
# reaction center prediction.
g3 = smiles_to_bigraph(test_smiles3, node_featurizer=test_node_featurizer,
canonical_atom_order=False)
assert torch.allclose(g3.ndata['hv'], torch.tensor([[6.], [6.], [7.], [6.], [6.], [6.],
[6.], [6.], [6.], [6.], [6.], [6.],
[7.], [8.], [8.], [6.], [6.], [6.],
[6.], [8.], [6.], [6.], [8.]]))
def test_mol_to_bigraph():
mol1 = Chem.MolFromSmiles(test_smiles1)
g1 = mol_to_bigraph(mol1, add_self_loop=True)
src, dst = g1.edges()
assert torch.allclose(src, torch.LongTensor([0, 2, 2, 1, 0, 1, 2]))
assert torch.allclose(dst, torch.LongTensor([2, 0, 1, 2, 0, 1, 2]))
# Test the case without self loops.
mol2 = Chem.MolFromSmiles(test_smiles2)
test_node_featurizer = TestAtomFeaturizer()
test_edge_featurizer = TestBondFeaturizer()
g2 = mol_to_bigraph(mol2, add_self_loop=False,
node_featurizer=test_node_featurizer,
edge_featurizer=test_edge_featurizer)
assert torch.allclose(g2.ndata['hv'], torch.tensor([[9.], [6.], [6.], [6.],
[6.], [6.], [6.]]))
assert torch.allclose(g2.edata['he'], torch.tensor([[0.], [0.], [1.], [1.], [1.],
[1.], [1.], [1.], [1.], [1.],
[1.], [1.], [1.], [1.]]))
# Test the case where atoms come with a default order and we do not
# want to change the order, which is related to the application of
# reaction center prediction.
mol3 = Chem.MolFromSmiles(test_smiles3)
g3 = mol_to_bigraph(mol3, node_featurizer=test_node_featurizer,
canonical_atom_order=False)
assert torch.allclose(g3.ndata['hv'], torch.tensor([[6.], [6.], [7.], [6.], [6.], [6.],
[6.], [6.], [6.], [6.], [6.], [6.],
[7.], [8.], [8.], [6.], [6.], [6.],
[6.], [8.], [6.], [6.], [8.]]))
def test_smiles_to_complete_graph():
test_node_featurizer = TestAtomFeaturizer()
g1 = smiles_to_complete_graph(test_smiles1, add_self_loop=False,
node_featurizer=test_node_featurizer)
src, dst = g1.edges()
assert torch.allclose(src, torch.LongTensor([0, 0, 1, 1, 2, 2]))
assert torch.allclose(dst, torch.LongTensor([1, 2, 0, 2, 0, 1]))
assert torch.allclose(g1.ndata['hv'], torch.tensor([[6.], [8.], [6.]]))
# Test the case where atoms come with a default order and we do not
# want to change the order, which is related to the application of
# reaction center prediction.
g2 = smiles_to_complete_graph(test_smiles3, node_featurizer=test_node_featurizer,
canonical_atom_order=False)
assert torch.allclose(g2.ndata['hv'], torch.tensor([[6.], [6.], [7.], [6.], [6.], [6.],
[6.], [6.], [6.], [6.], [6.], [6.],
[7.], [8.], [8.], [6.], [6.], [6.],
[6.], [8.], [6.], [6.], [8.]]))
def test_mol_to_complete_graph():
test_node_featurizer = TestAtomFeaturizer()
mol1 = Chem.MolFromSmiles(test_smiles1)
g1 = mol_to_complete_graph(mol1, add_self_loop=False,
node_featurizer=test_node_featurizer)
src, dst = g1.edges()
assert torch.allclose(src, torch.LongTensor([0, 0, 1, 1, 2, 2]))
assert torch.allclose(dst, torch.LongTensor([1, 2, 0, 2, 0, 1]))
assert torch.allclose(g1.ndata['hv'], torch.tensor([[6.], [8.], [6.]]))
# Test the case where atoms come with a default order and we do not
# want to change the order, which is related to the application of
# reaction center prediction.
mol2 = Chem.MolFromSmiles(test_smiles3)
g2 = mol_to_complete_graph(mol2, node_featurizer=test_node_featurizer,
canonical_atom_order=False)
assert torch.allclose(g2.ndata['hv'], torch.tensor([[6.], [6.], [7.], [6.], [6.], [6.],
[6.], [6.], [6.], [6.], [6.], [6.],
[7.], [8.], [8.], [6.], [6.], [6.],
[6.], [8.], [6.], [6.], [8.]]))
def test_k_nearest_neighbors():
coordinates = np.array([[0.1, 0.1, 0.1],
[0.2, 0.1, 0.1],
[0.15, 0.15, 0.1],
[0.1, 0.15, 0.16],
[1.2, 0.1, 0.1],
[1.3, 0.2, 0.1]])
neighbor_cutoff = 1.
max_num_neighbors = 2
srcs, dsts, dists = k_nearest_neighbors(coordinates, neighbor_cutoff, max_num_neighbors)
assert srcs == [2, 3, 2, 0, 0, 1, 0, 2, 1, 5, 4]
assert dsts == [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5]
assert dists == [0.07071067811865478, 0.0781024967590666, 0.07071067811865483,
0.1, 0.07071067811865478, 0.07071067811865483, 0.0781024967590666,
0.0781024967590666, 1.0, 0.14142135623730956, 0.14142135623730956]
# Test the case where self loops are included
srcs, dsts, dists = k_nearest_neighbors(coordinates, neighbor_cutoff,
max_num_neighbors, self_loops=True)
assert srcs == [0, 2, 1, 2, 2, 0, 3, 0, 4, 5, 4, 5]
assert dsts == [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]
assert dists == [0.0, 0.07071067811865478, 0.0, 0.07071067811865483, 0.0,
0.07071067811865478, 0.0, 0.0781024967590666, 0.0,
0.14142135623730956, 0.14142135623730956, 0.0]
# Test the case where max_num_neighbors is not given
srcs, dsts, dists = k_nearest_neighbors(coordinates, neighbor_cutoff=10.)
assert srcs == [1, 2, 3, 4, 5, 0, 2, 3, 4, 5, 0, 1, 3, 4, 5,
0, 1, 2, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4]
assert dsts == [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5]
assert dists == [0.1, 0.07071067811865478, 0.0781024967590666, 1.1,
1.2041594578792296, 0.1, 0.07071067811865483,
0.12688577540449525, 1.0, 1.104536101718726,
0.07071067811865478, 0.07071067811865483,
0.0781024967590666, 1.0511898020814319, 1.151086443322134,
0.0781024967590666, 0.12688577540449525, 0.0781024967590666,
1.1027692415006867, 1.202538980657176, 1.1, 1.0,
1.0511898020814319, 1.1027692415006867, 0.14142135623730956,
1.2041594578792296, 1.104536101718726, 1.151086443322134,
1.202538980657176, 0.14142135623730956]
def test_smiles_to_nearest_neighbor_graph():
mol = Chem.MolFromSmiles(test_smiles1)
AllChem.EmbedMolecule(mol)
coordinates = mol.GetConformers()[0].GetPositions()
# Test node featurizer
test_node_featurizer = TestAtomFeaturizer()
g = smiles_to_nearest_neighbor_graph(test_smiles1, coordinates, neighbor_cutoff=10,
node_featurizer=test_node_featurizer)
assert torch.allclose(g.ndata['hv'], torch.tensor([[6.], [8.], [6.]]))
assert g.number_of_edges() == 6
assert 'dist' not in g.edata
# Test self loops
g = smiles_to_nearest_neighbor_graph(test_smiles1, coordinates, neighbor_cutoff=10,
add_self_loop=True)
assert g.number_of_edges() == 9
# Test max_num_neighbors
g = smiles_to_nearest_neighbor_graph(test_smiles1, coordinates, neighbor_cutoff=10,
max_num_neighbors=1, add_self_loop=True)
assert g.number_of_edges() == 3
# Test pairwise distances
g = smiles_to_nearest_neighbor_graph(test_smiles1, coordinates,
neighbor_cutoff=10, keep_dists=True)
assert 'dist' in g.edata
coordinates = torch.from_numpy(coordinates)
srcs, dsts = g.edges()
dist = torch.norm(
coordinates[srcs] - coordinates[dsts], dim=1, p=2).float().reshape(-1, 1)
assert torch.allclose(dist, g.edata['dist'])
def test_mol_to_nearest_neighbor_graph():
mol = Chem.MolFromSmiles(test_smiles1)
AllChem.EmbedMolecule(mol)
coordinates = mol.GetConformers()[0].GetPositions()
# Test node featurizer
test_node_featurizer = TestAtomFeaturizer()
g = mol_to_nearest_neighbor_graph(mol, coordinates, neighbor_cutoff=10,
node_featurizer=test_node_featurizer)
assert torch.allclose(g.ndata['hv'], torch.tensor([[6.], [8.], [6.]]))
assert g.number_of_edges() == 6
assert 'dist' not in g.edata
# Test self loops
g = mol_to_nearest_neighbor_graph(mol, coordinates, neighbor_cutoff=10, add_self_loop=True)
assert g.number_of_edges() == 9
# Test max_num_neighbors
g = mol_to_nearest_neighbor_graph(mol, coordinates, neighbor_cutoff=10,
max_num_neighbors=1, add_self_loop=True)
assert g.number_of_edges() == 3
# Test pairwise distances
g = mol_to_nearest_neighbor_graph(mol, coordinates, neighbor_cutoff=10, keep_dists=True)
assert 'dist' in g.edata
coordinates = torch.from_numpy(coordinates)
srcs, dsts = g.edges()
dist = torch.norm(
coordinates[srcs] - coordinates[dsts], dim=1, p=2).float().reshape(-1, 1)
assert torch.allclose(dist, g.edata['dist'])
if __name__ == '__main__':
test_smiles_to_bigraph()
test_mol_to_bigraph()
test_smiles_to_complete_graph()
test_mol_to_complete_graph()
test_k_nearest_neighbors()
test_smiles_to_nearest_neighbor_graph()
test_mol_to_nearest_neighbor_graph()
| 50.430328 | 95 | 0.557416 |
59c6ea9ae107800ff9f0b9bc3a9dee45e28e9ace | 2,975 | py | Python | unified/models/fs.py | LoganCook/reporting-unified | 9d2c7e083c5c400e9120bb8552348e41406a1bc1 | [
"Apache-2.0"
] | null | null | null | unified/models/fs.py | LoganCook/reporting-unified | 9d2c7e083c5c400e9120bb8552348e41406a1bc1 | [
"Apache-2.0"
] | null | null | null | unified/models/fs.py | LoganCook/reporting-unified | 9d2c7e083c5c400e9120bb8552348e41406a1bc1 | [
"Apache-2.0"
] | null | null | null | from . import db, id_column, to_dict
class Owner(db.Model):
"""Storage Owner"""
id = id_column()
name = db.Column(db.String(64), unique=True, nullable=False)
usage = db.relationship("Usage", backref="owner")
def json(self):
"""JSON"""
return to_dict(self, ["name"])
class Project(db.Model):
"""Storage Group/Project"""
id = id_column()
name = db.Column(db.String(64), unique=True, nullable=False)
usage = db.relationship("Usage", backref="project")
def json(self):
"""JSON"""
return to_dict(self, ["name"])
class Host(db.Model):
"""Storage Host"""
id = id_column()
name = db.Column(db.String(256), unique=True, nullable=False)
filesystems = db.relationship("Filesystem", backref="host")
def json(self):
"""JSON"""
return to_dict(self, ["name"])
class Snapshot(db.Model):
"""Storage Snapshot"""
id = id_column()
ts = db.Column(db.Integer, nullable=False)
usage = db.relationship("Usage", backref="snapshot")
bavail = db.Column(db.BigInteger)
bfree = db.Column(db.BigInteger)
blocks = db.Column(db.BigInteger)
bsize = db.Column(db.Integer)
favail = db.Column(db.BigInteger)
ffree = db.Column(db.BigInteger)
files = db.Column(db.BigInteger)
frsize = db.Column(db.Integer)
filesystem_id = db.Column(None,
db.ForeignKey("filesystem.id"),
nullable=False)
def json(self):
"""JSON"""
return to_dict(self,
["ts", "filesystem_id", "bavail", "bfree", "blocks",
"bsize", "favail", "ffree", "files", "frsize"])
class Filesystem(db.Model):
"""Filesystem"""
id = id_column()
name = db.Column(db.String(256), unique=True, nullable=False)
snapshots = db.relationship("Snapshot", backref="filesystem")
host_id = db.Column(None, db.ForeignKey("host.id"), nullable=False)
def json(self):
"""JSON"""
return to_dict(self, ["name", "host_id"])
class Usage(db.Model):
"""Owner/Group Usage"""
id = id_column()
blocks = db.Column(db.BigInteger, nullable=False)
bytes = db.Column(db.BigInteger, nullable=False)
files = db.Column(db.BigInteger, nullable=False)
owner_id = db.Column(None,
db.ForeignKey("owner.id"),
nullable=False,
index=True)
project_id = db.Column(None,
db.ForeignKey("project.id"),
nullable=False,
index=True)
snapshot_id = db.Column(None,
db.ForeignKey("snapshot.id"),
nullable=False,
index=True)
def json(self):
"""JSON"""
return to_dict(self, ["blocks", "bytes", "files", "owner_id",
"project_id", "snapshot_id"])
| 30.989583 | 75 | 0.552941 |
661b0c64f78e0fec331216cbda51fce168b0bb03 | 5,773 | py | Python | io-control/configuration.py | wozio/home-system | ac73bec6e5050246f0e3debfbdfa43d4762bb144 | [
"MIT"
] | null | null | null | io-control/configuration.py | wozio/home-system | ac73bec6e5050246f0e3debfbdfa43d4762bb144 | [
"MIT"
] | null | null | null | io-control/configuration.py | wozio/home-system | ac73bec6e5050246f0e3debfbdfa43d4762bb144 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# This Python file uses the following encoding: utf-8
#import logging
import inputs
import outputs
# inputs definitions
defined_inputs = [
{
'name': "Temperatura salon",
'service': "io.1wire",
'id': -6052829097502393072
},
{
'name': "Temperatura pole",
'service': "io.1wire",
'id': 8358689710083321104
},
{
'name': "Test temperature",
'service': "io.test",
'id': 1
}
]
# outputs definitions
defined_outputs = [
{
'name': "Kocioł grzanie",
'service': 'io.relay-board',
'id': 0
},
{
'name': "Pompa cyrkulacji CWU",
'service': 'io.relay-board',
'id': 3
}
]
# rules callbacks
def heating_auto():
if inputs.inputs["Temperatura salon"].get()[1] > 21.5 or inputs.inputs["Temperatura pole"].get()[1] > inputs.inputs["Temperatura salon"].get()[1]:
outputs.outputs["Kocioł grzanie"].set(0)
elif inputs.inputs["Temperatura salon"].get()[1] < 21 and inputs.inputs["Temperatura pole"].get()[1] < 21:
outputs.outputs["Kocioł grzanie"].set(1)
def heating_off():
outputs.outputs["Kocioł grzanie"].set(0)
def heating_on():
outputs.outputs["Kocioł grzanie"].set(1)
def circulation_auto():
now = inputs.inputs["Timer"].get()
minutes = now.hour * 60 + now.minute
if 0 <= now.weekday() <= 4:
if (345 <= minutes < 420) or (960 <= minutes < 1380):
outputs.outputs["Pompa cyrkulacji CWU"].set(1)
else:
outputs.outputs["Pompa cyrkulacji CWU"].set(0)
else:
if 420 <= minutes < 1380:
outputs.outputs["Pompa cyrkulacji CWU"].set(1)
else:
outputs.outputs["Pompa cyrkulacji CWU"].set(0)
pass
def circulation_off():
outputs.outputs["Pompa cyrkulacji CWU"].set(0)
def circulation_on():
outputs.outputs["Pompa cyrkulacji CWU"].set(1)
# rules list
rules = [
{
"name": "Ogrzewanie automatyczne",
"rule": heating_auto,
"inputs": [
"Temperatura salon",
"Temperatura pole"
],
"outputs": [
"Kocioł grzanie"
]
},
{
"name": "Ogrzewanie wylaczone",
"rule": heating_off,
"inputs": [
],
"outputs": [
"Kocioł grzanie"
]
},
{
"name": "Ogrzewanie włączone",
"rule": heating_on,
"inputs": [
],
"outputs": [
"Kocioł grzanie"
]
},
{
"name": "Cyrkulacja automatyczna",
"rule": circulation_auto,
"inputs": [
"Timer"
],
"outputs": [
"Pompa cyrkulacji CWU"
]
},
{
"name": "Cyrkulacja wylaczona",
"rule": circulation_off,
"inputs": [
],
"outputs": [
"Pompa cyrkulacji CWU"
]
},
{
"name": "Cyrkulacja włączona",
"rule": circulation_on,
"inputs": [
],
"outputs": [
"Pompa cyrkulacji CWU"
]
}
]
# services list
services = [
{
"name": "Ogrzewanie",
"displays": [
{
"name": "Temperatura na zewnątrz",
"data": {
"type": "temperature",
"from": "Temperatura pole"
}
},
{
"name": "Temperatura w salonie",
"data": {
"type": "temperature",
"from": "Temperatura salon"
}
},
{
"name": "Ogrzewanie",
"data": {
"type": "state",
"from": "Kocioł grzanie"
}
}
],
"settings": [
{
"name": "Tryb ogrzewania",
"data": {
"type": "switch",
"default": "automatyczne",
"values": {
"wyłączone": {
"rule": "Ogrzewanie wylaczone"
},
"automatyczne": {
"rule": "Ogrzewanie automatyczne"
},
"włączone": {
"rule": "Ogrzewanie włączone"
}
}
}
}
]
},
{
"name": "Cyrkulacja CWU",
"displays": [
{
"name": "Cyrkulacja",
"data": {
"type": "state",
"from": "Pompa cyrkulacji CWU"
}
}
],
"settings": [
{
"name": "Tryb cyrkulacji",
"data": {
"type": "switch",
"default": "automatyczna",
"values": {
"wyłączona": {
"rule": "Cyrkulacja wylaczona"
},
"automatyczna": {
"rule": "Cyrkulacja automatyczna"
},
"włączona": {
"rule": "Cyrkulacja włączona"
}
}
}
}
]
},
{
"name": "Test service",
"displays": [
{
"name": "Testowe wejście temperatury",
"data": {
"type": "temperature",
"from": "Test temperature"
}
}
],
"settings": [
]
}
]
| 24.883621 | 150 | 0.401005 |
1f8191dcd5cf8ada2a017d89611a991a15449f8b | 38,034 | py | Python | autotest/gcore/tiff_read.py | HongqiangWei/gdal | f7c427926438cc39d31e4459fa6401321f8e62f0 | [
"MIT"
] | null | null | null | autotest/gcore/tiff_read.py | HongqiangWei/gdal | f7c427926438cc39d31e4459fa6401321f8e62f0 | [
"MIT"
] | null | null | null | autotest/gcore/tiff_read.py | HongqiangWei/gdal | f7c427926438cc39d31e4459fa6401321f8e62f0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test basic read support for a all datatypes from a TIFF file.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
import os
import sys
import string
import shutil
sys.path.append( '../pymod' )
import gdaltest
from osgeo import gdal, osr
###############################################################################
# When imported build a list of units based on the files available.
gdaltest_list = []
init_list = [ \
('byte.tif', 1, 4672, None),
('int10.tif', 1, 4672, None),
('int12.tif', 1, 4672, None),
('int16.tif', 1, 4672, None),
('uint16.tif', 1, 4672, None),
('int24.tif', 1, 4672, None),
('int32.tif', 1, 4672, None),
('uint32.tif', 1, 4672, None),
('float16.tif', 1, 4672, None),
('float24.tif', 1, 4672, None),
('float32.tif', 1, 4672, None),
('float32_minwhite.tif', 1, 1, None),
('float64.tif', 1, 4672, None),
('cint16.tif', 1, 5028, None),
('cint32.tif', 1, 5028, None),
('cfloat32.tif', 1, 5028, None),
('cfloat64.tif', 1, 5028, None),
# The following four related partial final strip/tiles (#1179)
('separate_tiled.tif', 2, 15234, None),
('seperate_strip.tif', 2, 15234, None),
('contig_tiled.tif', 2, 15234, None),
('contig_strip.tif', 2, 15234, None),
('empty1bit.tif', 1, 0, None)]
###############################################################################
# Test absolute/offset && index directory access
def tiff_read_off():
# Test absolute/offset directory access
ds = gdal.Open('GTIFF_DIR:off:408:data/byte.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
return 'fail'
# Test index directory access
ds = gdal.Open('GTIFF_DIR:1:data/byte.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
return 'fail'
# Check that georeferencing is read properly when accessing "GTIFF_DIR" subdatasets (#3478)
gt = ds.GetGeoTransform()
if gt != (440720.0, 60.0, 0.0, 3751320.0, 0.0, -60.0):
gdaltest.post_reason('did not get expected geotransform')
print(gt)
return 'fail'
return 'success'
###############################################################################
# Confirm we interprete bands as alpha when we should, and not when we
# should not.
def tiff_check_alpha():
# Grey + alpha
ds = gdal.Open('data/stefan_full_greyalpha.tif')
if ds.GetRasterBand(2).GetRasterColorInterpretation()!= gdal.GCI_AlphaBand:
gdaltest.post_reason( 'Wrong color interpretation (stefan_full_greyalpha).')
print(ds.GetRasterBand(2).GetRasterColorInterpretation())
return 'fail'
ds = None
# RGB + alpha
ds = gdal.Open('data/stefan_full_rgba.tif')
if ds.GetRasterBand(4).GetRasterColorInterpretation()!= gdal.GCI_AlphaBand:
gdaltest.post_reason( 'Wrong color interpretation (stefan_full_rgba).')
print(ds.GetRasterBand(4).GetRasterColorInterpretation())
return 'fail'
ds = None
# RGB + undefined
ds = gdal.Open('data/stefan_full_rgba_photometric_rgb.tif')
if ds.GetRasterBand(4).GetRasterColorInterpretation()!= gdal.GCI_Undefined:
gdaltest.post_reason( 'Wrong color interpretation (stefan_full_rgba_photometric_rgb).')
print(ds.GetRasterBand(4).GetRasterColorInterpretation())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a CMYK tiff as RGBA image
def tiff_read_cmyk_rgba():
ds = gdal.Open('data/rgbsmall_cmyk.tif')
md = ds.GetMetadata('IMAGE_STRUCTURE')
if 'SOURCE_COLOR_SPACE' not in md or md['SOURCE_COLOR_SPACE'] != 'CMYK':
print('bad value for IMAGE_STRUCTURE[SOURCE_COLOR_SPACE]')
return 'fail'
if ds.GetRasterBand(1).GetRasterColorInterpretation()!= gdal.GCI_RedBand:
gdaltest.post_reason( 'Wrong color interpretation.')
print(ds.GetRasterBand(1).GetRasterColorInterpretation())
return 'fail'
if ds.GetRasterBand(4).GetRasterColorInterpretation()!= gdal.GCI_AlphaBand:
gdaltest.post_reason( 'Wrong color interpretation (alpha).')
print(ds.GetRasterBand(4).GetRasterColorInterpretation())
return 'fail'
if ds.GetRasterBand(1).Checksum() != 23303:
print('Expected checksum = %d. Got = %d' % (23303, ds.GetRasterBand(1).Checksum()))
return 'fail'
return 'success'
###############################################################################
# Test reading a CMYK tiff as a raw image
def tiff_read_cmyk_raw():
ds = gdal.Open('GTIFF_RAW:data/rgbsmall_cmyk.tif')
if ds.GetRasterBand(1).GetRasterColorInterpretation()!= gdal.GCI_CyanBand:
gdaltest.post_reason( 'Wrong color interpretation.')
print(ds.GetRasterBand(1).GetRasterColorInterpretation())
return 'fail'
if ds.GetRasterBand(1).Checksum() != 29430:
print('Expected checksum = %d. Got = %d' % (29430, ds.GetRasterBand(1).Checksum()))
return 'fail'
return 'success'
###############################################################################
# Test reading a OJPEG image
def tiff_read_ojpeg():
md = gdal.GetDriverByName('GTiff').GetMetadata()
if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:
return 'skip'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.Open('data/zackthecat.tif')
gdal.PopErrorHandler()
if ds is None:
if gdal.GetLastErrorMsg().find('Cannot open TIFF file due to missing codec') == 0:
return 'skip'
else:
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler('CPLQuietErrorHandler')
got_cs = ds.GetRasterBand(1).Checksum()
gdal.PopErrorHandler()
expected_cs = 61570
if got_cs != expected_cs:
print('Expected checksum = %d. Got = %d' % (expected_cs, got_cs))
return 'fail'
return 'success'
###############################################################################
# Read a .tif.gz file
def tiff_read_gzip():
try:
os.remove('data/byte.tif.gz.properties')
except:
pass
ds = gdal.Open('/vsigzip/./data/byte.tif.gz')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
try:
os.stat('data/byte.tif.gz.properties')
gdaltest.post_reason('did not expect data/byte.tif.gz.properties')
return 'fail'
except:
return 'success'
###############################################################################
# Read a .tif.zip file (with explicit filename)
def tiff_read_zip_1():
ds = gdal.Open('/vsizip/./data/byte.tif.zip/byte.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Read a .tif.zip file (with implicit filename)
def tiff_read_zip_2():
ds = gdal.Open('/vsizip/./data/byte.tif.zip')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Read a .tif.zip file with a single file in a subdirectory (with explicit filename)
def tiff_read_zip_3():
ds = gdal.Open('/vsizip/./data/onefileinsubdir.zip/onefileinsubdir/byte.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Read a .tif.zip file with a single file in a subdirectory(with implicit filename)
def tiff_read_zip_4():
ds = gdal.Open('/vsizip/./data/onefileinsubdir.zip')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Read a .tif.zip file with 2 files in a subdirectory
def tiff_read_zip_5():
ds = gdal.Open('/vsizip/./data/twofileinsubdir.zip/twofileinsubdir/byte.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Read a .tar file (with explicit filename)
def tiff_read_tar_1():
ds = gdal.Open('/vsitar/./data/byte.tar/byte.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Read a .tar file (with implicit filename)
def tiff_read_tar_2():
ds = gdal.Open('/vsitar/./data/byte.tar')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Read a .tgz file (with explicit filename)
def tiff_read_tgz_1():
ds = gdal.Open('/vsitar/./data/byte.tgz/byte.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Read a .tgz file (with implicit filename)
def tiff_read_tgz_2():
ds = gdal.Open('/vsitar/./data/byte.tgz')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Check handling of non-degree angular units (#601)
def tiff_grads():
ds = gdal.Open('data/test_gf.tif')
srs = ds.GetProjectionRef()
if srs.find('PARAMETER["latitude_of_origin",46.8]') == -1:
print(srs)
gdaltest.post_reason( 'Did not get expected latitude of origin.' )
return 'fail'
return 'success'
###############################################################################
# Check Erdas Citation Parsing for coordinate system.
def tiff_citation():
build_info = gdal.VersionInfo('BUILD_INFO')
if build_info.find('ESRI_BUILD=YES') == -1:
return 'skip'
ds = gdal.Open('data/citation_mixedcase.tif')
wkt = ds.GetProjectionRef()
expected_wkt = """PROJCS["NAD_1983_HARN_StatePlane_Oregon_North_FIPS_3601_Feet_Intl",GEOGCS["GCS_North_American_1983_HARN",DATUM["NAD83_High_Accuracy_Reference_Network",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["False_Easting",8202099.737532808],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",-120.5],PARAMETER["Standard_Parallel_1",44.33333333333334],PARAMETER["Standard_Parallel_2",46.0],PARAMETER["Latitude_Of_Origin",43.66666666666666],UNIT["Foot",0.3048]]"""
if wkt != expected_wkt:
print('got: ', wkt)
gdaltest.post_reason( 'Erdas citation processing failing?' )
return 'fail'
return 'success'
###############################################################################
# Check that we can read linear projection parameters properly (#3901)
def tiff_linearparmunits():
# Test the file with the correct formulation.
ds = gdal.Open('data/spaf27_correct.tif')
wkt = ds.GetProjectionRef()
ds = None
srs = osr.SpatialReference( wkt )
fe = srs.GetProjParm(osr.SRS_PP_FALSE_EASTING)
if abs(fe-2000000.0) > 0.001:
gdaltest.post_reason( 'did not get expected false easting (1)' )
return 'fail'
# Test the file with the old (broken) GDAL formulation.
ds = gdal.Open('data/spaf27_brokengdal.tif')
wkt = ds.GetProjectionRef()
ds = None
srs = osr.SpatialReference( wkt )
fe = srs.GetProjParm(osr.SRS_PP_FALSE_EASTING)
if abs(fe-609601.219202438) > 0.001:
gdaltest.post_reason( 'did not get expected false easting (2)' )
return 'fail'
# Test the file when using an EPSG code.
ds = gdal.Open('data/spaf27_epsg.tif')
wkt = ds.GetProjectionRef()
ds = None
srs = osr.SpatialReference( wkt )
fe = srs.GetProjParm(osr.SRS_PP_FALSE_EASTING)
if abs(fe-2000000.0) > 0.001:
gdaltest.post_reason( 'did not get expected false easting (3)' )
return 'fail'
return 'success'
###############################################################################
# Check that the GTIFF_LINEAR_UNITS handling works properly (#3901)
def tiff_linearparmunits2():
gdal.SetConfigOption( 'GTIFF_LINEAR_UNITS', 'BROKEN' )
# Test the file with the correct formulation.
ds = gdal.Open('data/spaf27_correct.tif')
wkt = ds.GetProjectionRef()
ds = None
srs = osr.SpatialReference( wkt )
fe = srs.GetProjParm(osr.SRS_PP_FALSE_EASTING)
if abs(fe-6561666.66667) > 0.001:
gdaltest.post_reason( 'did not get expected false easting (1)' )
return 'fail'
# Test the file with the correct formulation that is marked as correct.
ds = gdal.Open('data/spaf27_markedcorrect.tif')
wkt = ds.GetProjectionRef()
ds = None
srs = osr.SpatialReference( wkt )
fe = srs.GetProjParm(osr.SRS_PP_FALSE_EASTING)
if abs(fe-2000000.0) > 0.001:
gdaltest.post_reason( 'did not get expected false easting (2)' )
return 'fail'
# Test the file with the old (broken) GDAL formulation.
ds = gdal.Open('data/spaf27_brokengdal.tif')
wkt = ds.GetProjectionRef()
ds = None
srs = osr.SpatialReference( wkt )
fe = srs.GetProjParm(osr.SRS_PP_FALSE_EASTING)
if abs(fe-2000000.0) > 0.001:
gdaltest.post_reason( 'did not get expected false easting (3)' )
return 'fail'
gdal.SetConfigOption( 'GTIFF_LINEAR_UNITS', 'DEFAULT' )
return 'success'
###############################################################################
# Test GTiffSplitBitmapBand to treat one row 1bit files as scanline blocks (#2622)
def tiff_g4_split():
if not 'GetBlockSize' in dir(gdal.Band):
return 'skip'
ds = gdal.Open('data/slim_g4.tif')
(blockx, blocky) = ds.GetRasterBand(1).GetBlockSize()
if blocky != 1:
gdaltest.post_reason( 'Did not get scanline sized blocks.' )
return 'fail'
cs = ds.GetRasterBand(1).Checksum()
if cs != 3322:
print(cs)
gdaltest.post_reason( 'Got wrong checksum' )
return 'fail'
return 'success'
###############################################################################
# Test reading a tiff with multiple images in it
def tiff_multi_images():
# Implicitely get the content of the first image (backward compatibility)
ds = gdal.Open('data/twoimages.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
md = ds.GetMetadata('SUBDATASETS')
if md['SUBDATASET_1_NAME'] != 'GTIFF_DIR:1:data/twoimages.tif':
print(md)
gdaltest.post_reason( 'did not get expected subdatasets metadata.' )
return 'fail'
ds = None
# Explicitely get the content of the first image
ds = gdal.Open('GTIFF_DIR:1:data/twoimages.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
# Explicitely get the content of the second image
ds = gdal.Open('GTIFF_DIR:2:data/twoimages.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a tiff from a memory buffer (#2931)
def tiff_vsimem():
try:
gdal.FileFromMemBuffer
except:
return 'skip'
content = open('data/byte.tif', mode='rb').read()
# Create in-memory file
gdal.FileFromMemBuffer('/vsimem/tiffinmem', content)
ds = gdal.Open('/vsimem/tiffinmem', gdal.GA_Update)
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds.GetRasterBand(1).Fill(0)
ds = None
ds = gdal.Open('/vsimem/tiffinmem')
if ds.GetRasterBand(1).Checksum() != 0:
print('Expected checksum = %d. Got = %d' % (0, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
# Also test with anti-slash
ds = gdal.Open('/vsimem\\tiffinmem')
if ds.GetRasterBand(1).Checksum() != 0:
print('Expected checksum = %d. Got = %d' % (0, ds.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
# Release memory associated to the in-memory file
gdal.Unlink('/vsimem/tiffinmem')
return 'success'
###############################################################################
# Test reading a tiff from inside a zip in a memory buffer !
def tiff_vsizip_and_mem():
try:
gdal.FileFromMemBuffer
except:
return 'skip'
content = open('data/byte.tif.zip', mode='rb').read()
# Create in-memory file
gdal.FileFromMemBuffer('/vsimem/tiffinmem.zip', content)
ds = gdal.Open('/vsizip/vsimem/tiffinmem.zip/byte.tif')
if ds.GetRasterBand(1).Checksum() != 4672:
print('Expected checksum = %d. Got = %d' % (4672, ds.GetRasterBand(1).Checksum()))
return 'fail'
# Release memory associated to the in-memory file
gdal.Unlink('/vsimem/tiffinmem.zip')
return 'success'
###############################################################################
# Test reading a GeoTIFF with only ProjectedCSTypeGeoKey defined (ticket #3019)
def tiff_ProjectedCSTypeGeoKey_only():
ds = gdal.Open('data/ticket3019.tif')
if ds.GetProjectionRef().find('WGS 84 / UTM zone 31N') == -1:
print(ds.GetProjectionRef())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a 12bit jpeg compressed geotiff.
def tiff_12bitjpeg():
old_accum = gdal.GetConfigOption( 'CPL_ACCUM_ERROR_MSG', 'OFF' )
gdal.SetConfigOption( 'CPL_ACCUM_ERROR_MSG', 'ON' )
gdal.ErrorReset()
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
try:
os.unlink('data/mandrilmini_12bitjpeg.tif.aux.xml')
except:
pass
try:
ds = gdal.Open('data/mandrilmini_12bitjpeg.tif')
ds.GetRasterBand(1).ReadRaster(0,0,1,1)
except:
ds = None
gdal.PopErrorHandler()
gdal.SetConfigOption( 'CPL_ACCUM_ERROR_MSG', old_accum )
if gdal.GetLastErrorMsg().find(
'Unsupported JPEG data precision 12') != -1:
sys.stdout.write('(12bit jpeg not available) ... ')
return 'skip'
elif ds is None:
gdaltest.post_reason( 'failed to open 12bit jpeg file with unexpected error' )
return 'fail'
try:
stats = ds.GetRasterBand(1).GetStatistics( 0, 1 )
except:
pass
if stats[2] < 2150 or stats[2] > 2180 or str(stats[2]) == 'nan':
gdaltest.post_reason( 'did not get expected mean for band1.')
print(stats)
return 'fail'
ds = None
os.unlink('data/mandrilmini_12bitjpeg.tif.aux.xml')
return 'success'
###############################################################################
# Test that statistics for TIFF files are stored and correctly read from .aux.xml
def tiff_read_stats_from_pam():
try:
os.remove('data/byte.tif.aux.xml')
except:
pass
ds = gdal.Open('data/byte.tif')
md = ds.GetRasterBand(1).GetMetadata()
if 'STATISTICS_MINIMUM' in md:
gdaltest.post_reason('Unexpected presence of STATISTICS_MINIMUM')
return 'fail'
# Force statistics computation
stats = ds.GetRasterBand(1).GetStatistics(0, 1)
if stats[0] != 74.0 or stats[1] != 255.0:
print(stats)
return 'fail'
ds = None
try:
os.stat('data/byte.tif.aux.xml')
except:
gdaltest.post_reason('Expected generation of data/byte.tif.aux.xml')
return 'fail'
ds = gdal.Open('data/byte.tif')
# Just read statistics (from PAM) without forcing their computation
stats = ds.GetRasterBand(1).GetStatistics(0, 0)
if stats[0] != 74.0 or stats[1] != 255.0:
print(stats)
return 'fail'
ds = None
try:
os.remove('data/byte.tif.aux.xml')
except:
pass
return 'success'
###############################################################################
# Test extracting georeferencing from a .TAB file
def tiff_read_from_tab():
ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_read_from_tab.tif', 1, 1)
ds = None
f = open('tmp/tiff_read_from_tab.tab', 'wt')
f.write("""!table
!version 300
!charset WindowsLatin1
Definition Table
File "HP.TIF"
Type "RASTER"
(400000,1200000) (0,4000) Label "Pt 1",
(500000,1200000) (4000,4000) Label "Pt 2",
(500000,1300000) (4000,0) Label "Pt 3",
(400000,1300000) (0,0) Label "Pt 4"
CoordSys Earth Projection 8, 79, "m", -2, 49, 0.9996012717, 400000, -100000
Units "m"
""")
f.close()
ds = gdal.Open('tmp/tiff_read_from_tab.tif')
gt = ds.GetGeoTransform()
wkt = ds.GetProjectionRef()
ds = None
gdal.GetDriverByName('GTiff').Delete('tmp/tiff_read_from_tab.tif')
try:
os.stat('tmp/tiff_read_from_tab.tab')
gdaltest.post_reason('did not expect to find .tab file at that point')
return 'fail'
except:
pass
if gt != (400000.0, 25.0, 0.0, 1300000.0, 0.0, -25.0):
gdaltest.post_reason('did not get expected geotransform')
print(gt)
return 'fail'
if wkt.find('OSGB_1936') == -1:
gdaltest.post_reason('did not get expected SRS')
print(wkt)
return 'fail'
return 'success'
###############################################################################
# Test reading PixelIsPoint file.
def tiff_read_pixelispoint():
gdal.SetConfigOption( 'GTIFF_POINT_GEO_IGNORE', 'FALSE' )
ds = gdal.Open( 'data/byte_point.tif' )
gt = ds.GetGeoTransform()
ds = None
gt_expected = (440690.0, 60.0, 0.0, 3751350.0, 0.0, -60.0)
if gt != gt_expected:
print(gt)
gdaltest.post_reason( 'did not get expected geotransform' )
return 'fail'
gdal.SetConfigOption( 'GTIFF_POINT_GEO_IGNORE', 'TRUE' )
ds = gdal.Open( 'data/byte_point.tif' )
gt = ds.GetGeoTransform()
ds = None
gt_expected = (440720.0, 60.0, 0.0, 3751320.0, 0.0, -60.0)
if gt != gt_expected:
print(gt)
gdaltest.post_reason( 'did not get expected geotransform with GTIFF_POINT_GEO_IGNORE TRUE' )
return 'fail'
gdal.SetConfigOption( 'GTIFF_POINT_GEO_IGNORE', 'FALSE' )
return 'success'
###############################################################################
# Test reading a GeoTIFF file with a geomatrix in PixelIsPoint format.
def tiff_read_geomatrix():
gdal.SetConfigOption( 'GTIFF_POINT_GEO_IGNORE', 'FALSE' )
ds = gdal.Open( 'data/geomatrix.tif' )
gt = ds.GetGeoTransform()
ds = None
gt_expected = (1841001.75, 1.5, -5.0, 1144003.25, -5.0, -1.5)
if gt != gt_expected:
print(gt)
gdaltest.post_reason( 'did not get expected geotransform' )
return 'fail'
gdal.SetConfigOption( 'GTIFF_POINT_GEO_IGNORE', 'TRUE' )
ds = gdal.Open( 'data/geomatrix.tif' )
gt = ds.GetGeoTransform()
ds = None
gt_expected = (1841000.0, 1.5, -5.0, 1144000.0, -5.0, -1.5)
if gt != gt_expected:
print(gt)
gdaltest.post_reason( 'did not get expected geotransform with GTIFF_POINT_GEO_IGNORE TRUE' )
return 'fail'
gdal.SetConfigOption( 'GTIFF_POINT_GEO_IGNORE', 'FALSE' )
return 'success'
###############################################################################
# Test that we don't crash when reading a TIFF with corrupted GeoTIFF tags
def tiff_read_corrupted_gtiff():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('data/corrupted_gtiff_tags.tif')
gdal.PopErrorHandler()
ds = None
err_msg = gdal.GetLastErrorMsg()
if err_msg.find('IO error during') == -1 and \
err_msg.find('Error fetching data for field') == -1:
gdaltest.post_reason( 'did not get expected error message' )
print(err_msg)
return 'fail'
return 'success'
###############################################################################
# Test that we don't crash when reading a TIFF with corrupted GeoTIFF tags
def tiff_read_tag_without_null_byte():
ds = gdal.Open('data/tag_without_null_byte.tif')
if gdal.GetLastErrorType() != 0:
gdaltest.post_reason( 'should have not emitted a warning, but only a CPLDebug() message' )
return 'fail'
ds = None
return 'success'
###############################################################################
# Test the effect of the GTIFF_IGNORE_READ_ERRORS configuration option (#3994)
def tiff_read_buggy_packbits():
old_val = gdal.GetConfigOption('GTIFF_IGNORE_READ_ERRORS')
gdal.SetConfigOption('GTIFF_IGNORE_READ_ERRORS', None)
ds = gdal.Open('data/byte_buggy_packbits.tif')
gdal.SetConfigOption('GTIFF_IGNORE_READ_ERRORS', old_val)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = ds.ReadRaster(0,0,20,20)
gdal.PopErrorHandler()
if ret is not None:
gdaltest.post_reason('did not expected a valid result')
return 'fail'
ds = None
gdal.SetConfigOption('GTIFF_IGNORE_READ_ERRORS', 'YES')
ds = gdal.Open('data/byte_buggy_packbits.tif')
gdal.SetConfigOption('GTIFF_IGNORE_READ_ERRORS', old_val)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = ds.ReadRaster(0,0,20,20)
gdal.PopErrorHandler()
if ret is None:
gdaltest.post_reason('expected a valid result')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a GeoEye _rpc.txt (#3639)
def tiff_read_rpc_txt():
shutil.copy('data/byte.tif', 'tmp/test.tif')
shutil.copy('data/test_rpc.txt', 'tmp/test_rpc.txt')
ds = gdal.Open('tmp/test.tif')
rpc_md = ds.GetMetadata('RPC')
ds = None
os.remove('tmp/test.tif')
os.remove('tmp/test_rpc.txt')
if not 'HEIGHT_OFF' in rpc_md:
return 'fail'
return 'success'
###############################################################################
# Test a very small TIFF with only 4 tags :
# Magic: 0x4949 <little-endian> Version: 0x2a
# Directory 0: offset 8 (0x8) next 0 (0)
# ImageWidth (256) SHORT (3) 1<1>
# ImageLength (257) SHORT (3) 1<1>
# StripOffsets (273) LONG (4) 1<0>
# StripByteCounts (279) LONG (4) 1<1>
def tiff_small():
content = '\x49\x49\x2A\x00\x08\x00\x00\x00\x04\x00\x00\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x11\x01\x04\x00\x01\x00\x00\x00\x00\x00\x00\x00\x17\x01\x04\x00\x01\x00\x00\x00\x01\x00\x00\x00'
# Create in-memory file
gdal.FileFromMemBuffer('/vsimem/small.tif', content)
ds = gdal.Open('/vsimem/small.tif')
if ds.GetRasterBand(1).Checksum() != 0:
print('Expected checksum = %d. Got = %d' % (0, ds.GetRasterBand(1).Checksum()))
return 'fail'
# Release memory associated to the in-memory file
gdal.Unlink('/vsimem/small.tif')
return 'success'
###############################################################################
# Test that we can workaround a DoS with
def tiff_dos_strip_chop():
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.Open('data/tiff_dos_strip_chop.tif')
gdal.PopErrorHandler()
ds = None
return 'success'
###############################################################################
# Test reading EXIF and GPS metadata
def tiff_read_exif_and_gps():
ds = gdal.Open('data/exif_and_gps.tif')
exif_md = ds.GetMetadata('EXIF')
ds = None
if exif_md is None or len(exif_md) == 0:
gdaltest.post_reason('failed')
return 'fail'
ds = gdal.Open('data/exif_and_gps.tif')
EXIF_GPSVersionID = ds.GetMetadataItem('EXIF_GPSVersionID', 'EXIF')
ds = None
if EXIF_GPSVersionID is None:
gdaltest.post_reason('failed')
return 'fail'
# We should not get any EXIF metadata with that file
ds = gdal.Open('data/byte.tif')
exif_md = ds.GetMetadata('EXIF')
ds = None
if not (exif_md is None or len(exif_md) == 0):
gdaltest.post_reason('failed')
return 'fail'
return 'success'
###############################################################################
# Test reading a pixel interleaved RGBA JPEG-compressed TIFF
def tiff_jpeg_rgba_pixel_interleaved():
md = gdal.GetDriverByName('GTiff').GetMetadata()
if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:
return 'skip'
ds = gdal.Open('data/stefan_full_rgba_jpeg_contig.tif')
md = ds.GetMetadata('IMAGE_STRUCTURE')
if md['INTERLEAVE'] != 'PIXEL':
gdaltest.post_reason('failed')
return 'fail'
expected_cs = [16404, 62700, 37913, 14174]
for i in range(4):
cs = ds.GetRasterBand(i+1).Checksum()
if cs != expected_cs[i]:
gdaltest.post_reason('failed')
return 'fail'
if ds.GetRasterBand(i+1).GetRasterColorInterpretation() != gdal.GCI_RedBand + i:
gdaltest.post_reason('failed')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a band interleaved RGBA JPEG-compressed TIFF
def tiff_jpeg_rgba_band_interleaved():
md = gdal.GetDriverByName('GTiff').GetMetadata()
if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:
return 'skip'
ds = gdal.Open('data/stefan_full_rgba_jpeg_separate.tif')
md = ds.GetMetadata('IMAGE_STRUCTURE')
if md['INTERLEAVE'] != 'BAND':
gdaltest.post_reason('failed')
return 'fail'
expected_cs = [16404, 62700, 37913, 14174]
for i in range(4):
cs = ds.GetRasterBand(i+1).Checksum()
if cs != expected_cs[i]:
gdaltest.post_reason('failed')
return 'fail'
if ds.GetRasterBand(i+1).GetRasterColorInterpretation() != gdal.GCI_RedBand + i:
gdaltest.post_reason('failed')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a YCbCr JPEG all-in-one-strip multiband TIFF (#3259, #3894)
def tiff_read_online_1():
md = gdal.GetDriverByName('GTiff').GetMetadata()
if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:
return 'skip'
if not gdaltest.download_file('http://trac.osgeo.org/gdal/raw-attachment/ticket/3259/imgpb17.tif', 'imgpb17.tif'):
return 'skip'
ds = gdal.Open('tmp/cache/imgpb17.tif')
gdal.ErrorReset()
cs = ds.GetRasterBand(1).Checksum()
ds = None
if gdal.GetLastErrorMsg() != '':
return 'fail'
if cs != 62628 and cs != 28554:
print(cs)
return 'fail'
return 'success'
###############################################################################
# Use GTIFF_DIRECT_IO=YES option combined with /vsicurl to test for multi-range
# support
def tiff_read_online_2():
if gdal.GetDriverByName('HTTP') is None:
return 'skip'
if gdaltest.gdalurlopen('http://download.osgeo.org/gdal/data/gtiff/utm.tif') is None:
print('cannot open URL')
return 'skip'
gdal.SetConfigOption('GTIFF_DIRECT_IO', 'YES')
gdal.SetConfigOption('CPL_VSIL_CURL_ALLOWED_EXTENSIONS', '.tif')
gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', 'EMPTY_DIR')
ds = gdal.Open('/vsicurl/http://download.osgeo.org/gdal/data/gtiff/utm.tif')
gdal.SetConfigOption('GTIFF_DIRECT_IO', None)
gdal.SetConfigOption('CPL_VSIL_CURL_ALLOWED_EXTENSIONS', None)
gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', None)
if ds is None:
gdaltest.post_reason('could not open dataset')
return 'fail'
# Read subsampled data
subsampled_data = ds.ReadRaster(0, 0, 512, 512, 128, 128)
ds = None
ds = gdal.GetDriverByName('MEM').Create('', 128,128)
ds.WriteRaster(0, 0, 128, 128, subsampled_data)
cs = ds.GetRasterBand(1).Checksum()
ds = None
if cs != 54935:
gdaltest.post_reason('wrong checksum')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Test reading a TIFF made of a single-strip that is more than 2GB (#5403)
def tiff_read_huge4GB():
if (gdaltest.filesystem_supports_sparse_files('tmp') == False):
ds = gdal.Open('data/huge4GB.tif')
if ds is None:
return 'fail'
else:
shutil.copy('data/huge4GB.tif', 'tmp/huge4GB.tif')
f = open('tmp/huge4GB.tif', 'rb+')
f.seek(65535 * 65535 + 401)
f.write(' ')
f.close()
ds = gdal.Open('tmp/huge4GB.tif')
if ds is None:
os.remove('tmp/huge4GB.tif')
return 'fail'
ds = None
os.remove('tmp/huge4GB.tif')
return 'success'
###############################################################################################
for item in init_list:
ut = gdaltest.GDALTest( 'GTiff', item[0], item[1], item[2] )
if ut is None:
print( 'GTiff tests skipped' )
sys.exit()
gdaltest_list.append( (ut.testOpen, item[0]) )
gdaltest_list.append( (tiff_read_off) )
gdaltest_list.append( (tiff_check_alpha) )
gdaltest_list.append( (tiff_read_cmyk_rgba) )
gdaltest_list.append( (tiff_read_cmyk_raw) )
gdaltest_list.append( (tiff_read_ojpeg) )
gdaltest_list.append( (tiff_read_gzip) )
gdaltest_list.append( (tiff_read_zip_1) )
gdaltest_list.append( (tiff_read_zip_2) )
gdaltest_list.append( (tiff_read_zip_3) )
gdaltest_list.append( (tiff_read_zip_4) )
gdaltest_list.append( (tiff_read_zip_5) )
gdaltest_list.append( (tiff_read_tar_1) )
gdaltest_list.append( (tiff_read_tar_2) )
gdaltest_list.append( (tiff_read_tgz_1) )
gdaltest_list.append( (tiff_read_tgz_2) )
gdaltest_list.append( (tiff_grads) )
gdaltest_list.append( (tiff_citation) )
gdaltest_list.append( (tiff_linearparmunits) )
gdaltest_list.append( (tiff_linearparmunits2) )
gdaltest_list.append( (tiff_g4_split) )
gdaltest_list.append( (tiff_multi_images) )
gdaltest_list.append( (tiff_vsimem) )
gdaltest_list.append( (tiff_vsizip_and_mem) )
gdaltest_list.append( (tiff_ProjectedCSTypeGeoKey_only) )
gdaltest_list.append( (tiff_12bitjpeg) )
gdaltest_list.append( (tiff_read_stats_from_pam) )
gdaltest_list.append( (tiff_read_from_tab) )
gdaltest_list.append( (tiff_read_pixelispoint) )
gdaltest_list.append( (tiff_read_geomatrix) )
gdaltest_list.append( (tiff_read_corrupted_gtiff) )
gdaltest_list.append( (tiff_read_tag_without_null_byte) )
gdaltest_list.append( (tiff_read_buggy_packbits) )
gdaltest_list.append( (tiff_read_rpc_txt) )
gdaltest_list.append( (tiff_small) )
gdaltest_list.append( (tiff_dos_strip_chop) )
gdaltest_list.append( (tiff_read_exif_and_gps) )
gdaltest_list.append( (tiff_jpeg_rgba_pixel_interleaved) )
gdaltest_list.append( (tiff_jpeg_rgba_band_interleaved) )
gdaltest_list.append( (tiff_read_online_1) )
gdaltest_list.append( (tiff_read_online_2) )
gdaltest_list.append( (tiff_read_huge4GB) )
if __name__ == '__main__':
gdaltest.setup_run( 'tiff_read' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 31.961345 | 596 | 0.589788 |
aa68b019b1129c135b0facd3ceddb81eb1dd901c | 2,725 | py | Python | cowait/cli/app/notebook.py | backtick-se/cowa | 760ddb3ded1b3995bc68f4b74cf28af0c094481f | [
"Apache-2.0"
] | 51 | 2020-06-04T06:08:14.000Z | 2022-03-28T06:59:53.000Z | cowait/cli/app/notebook.py | backtick-se/cowa | 760ddb3ded1b3995bc68f4b74cf28af0c094481f | [
"Apache-2.0"
] | 121 | 2020-06-01T12:09:32.000Z | 2022-03-31T20:47:57.000Z | cowait/cli/app/notebook.py | backtick-se/cowa | 760ddb3ded1b3995bc68f4b74cf28af0c094481f | [
"Apache-2.0"
] | 6 | 2020-06-11T16:05:20.000Z | 2022-03-23T06:30:17.000Z | import yaml
import click
import cowait.cli.commands
from cowait.cli import CliError
from .utils import parse_input_list
@click.group(help='start notebook', invoke_without_command=True)
@click.option('-c', '--cluster',
default=None,
type=str,
help='cluster name')
@click.option('-i', '--image',
type=str,
default=None,
help='remote image')
@click.pass_context
def notebook(ctx, cluster, image):
if ctx.invoked_subcommand is None:
cowait.cli.notebook(ctx.obj, image, cluster_name=cluster)
@notebook.command(help='run notebook as a task')
@click.argument('path', type=str)
@click.option('--image',
default=None,
type=str,
help='image name')
@click.option('-c', '--cluster',
default=None,
type=str,
help='cluster name')
@click.option('-n', '--name',
type=str,
default=None,
help='specific task name')
@click.option('-i', '--input',
type=str,
multiple=True,
help='specify task input')
@click.option('-e', '--env',
type=str,
multiple=True,
help='define enviornment variable')
@click.option('-b', '--build',
type=bool, is_flag=True,
help='build and push first',
default=False)
@click.option('-d', '--detach',
type=bool, is_flag=True,
help='run in detached mode',
default=False)
@click.option('-f', '--json', '--yml', '--yaml', 'file',
help='yaml/json file with inputs',
type=str,
default=None)
@click.option('-q', '--quiet',
type=bool, is_flag=True,
help='no output except result',
default=False)
@click.pass_context
def run(
ctx, path: str, image: str, cluster: str, name: str,
input, env, build: bool, detach: bool, file: str, quiet: bool
):
file_inputs = {}
if file is not None:
try:
with open(file, 'r') as f:
file_inputs = yaml.load(f, Loader=yaml.FullLoader)
except yaml.parser.ParserError as e:
raise CliError(f'Error in {file}: {e}')
if not isinstance(file_inputs, dict):
raise CliError('Error: Expected input file to contain a dictionary')
cowait.cli.run_notebook(
ctx.obj,
path,
image=image,
cluster=cluster,
name=name,
inputs={
**file_inputs,
**parse_input_list(input),
},
env=parse_input_list(env),
build=build,
detach=detach,
quiet=quiet)
| 29.945055 | 76 | 0.54055 |
04c3b2b7714ef37296e4fa9bcdeca2179a19a8bc | 104,304 | py | Python | pandas/core/internals/blocks.py | jsignell/pandas | 83eb75bf04e9020a13b1109ac714207159d7c11a | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/internals/blocks.py | jsignell/pandas | 83eb75bf04e9020a13b1109ac714207159d7c11a | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/internals/blocks.py | jsignell/pandas | 83eb75bf04e9020a13b1109ac714207159d7c11a | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | from datetime import date, datetime, timedelta
import functools
import inspect
import re
from typing import Any, List
import warnings
import numpy as np
from pandas._libs import NaT, Timestamp, lib, tslib, writers
import pandas._libs.internals as libinternals
from pandas._libs.tslibs import Timedelta, conversion
from pandas._libs.tslibs.timezones import tz_compare
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
astype_nansafe,
find_common_type,
infer_dtype_from,
infer_dtype_from_scalar,
maybe_downcast_numeric,
maybe_downcast_to_dtype,
maybe_infer_dtype_type,
maybe_promote,
maybe_upcast,
soft_convert_objects,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
_TD_DTYPE,
ensure_platform_int,
is_bool_dtype,
is_categorical,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_extension_type,
is_float_dtype,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_re,
is_re_compilable,
is_sparse,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.concat import concat_categorical, concat_datetime
from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import (
_isna_compat,
array_equivalent,
is_valid_nat_for_dtype,
isna,
notna,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical, DatetimeArray, PandasDtype, TimedeltaArray
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.core.indexers import (
check_setitem_lengths,
is_empty_indexer,
is_scalar_indexer,
)
import pandas.core.missing as missing
from pandas.core.nanops import nanpercentile
from pandas.io.formats.printing import pprint_thing
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ["_mgr_locs", "values", "ndim"]
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_extension = False
_can_hold_na = False
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = "dense"
_concatenator = staticmethod(np.concatenate)
def __init__(self, values, placement, ndim=None):
self.ndim = self._check_ndim(values, ndim)
self.mgr_locs = placement
self.values = values
if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values):
raise ValueError(
"Wrong number of items passed {val}, placement implies "
"{mgr}".format(val=len(self.values), mgr=len(self.mgr_locs))
)
def _check_ndim(self, values, ndim):
"""
ndim inference and validation.
Infers ndim from 'values' if not provided to __init__.
Validates that values.ndim and ndim are consistent if and only if
the class variable '_validate_ndim' is True.
Parameters
----------
values : array-like
ndim : int or None
Returns
-------
ndim : int
Raises
------
ValueError : the number of dimensions do not match
"""
if ndim is None:
ndim = values.ndim
if self._validate_ndim and values.ndim != ndim:
msg = "Wrong number of dimensions. values.ndim != ndim [{} != {}]"
raise ValueError(msg.format(values.ndim, ndim))
return ndim
@property
def _holder(self):
"""The array-like that can hold the underlying values.
None for 'Block', overridden by subclasses that don't
use an ndarray.
"""
return None
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if dtype is Categorical or dtype is CategoricalDtype:
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
elif is_categorical_dtype(dtype):
return True
return False
def external_values(self, dtype=None):
""" return an outside world format, currently just the ndarray """
return self.values
def internal_values(self, dtype=None):
""" return an internal format, currently just the ndarray
this should be the pure internal API format
"""
return self.values
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overridden to handle to_dense like operations
"""
if is_object_dtype(dtype):
return self.values.astype(object)
return self.values
def get_block_values(self, dtype=None):
"""
This is used in the JSON C code
"""
return self.get_values(dtype=dtype)
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, libinternals.BlockPlacement):
new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return self.dtype
def make_block(self, values, placement=None):
"""
Create a new block, with type inference propagate any values that are
not specified
"""
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, ndim=self.ndim)
def make_block_same_class(self, values, placement=None, ndim=None, dtype=None):
""" Wrap given values in a block of same type as self. """
if dtype is not None:
# issue 19431 fastparquet is passing this
warnings.warn(
"dtype argument is deprecated, will be removed in a future release.",
FutureWarning,
)
if placement is None:
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
return make_block(
values, placement=placement, ndim=ndim, klass=self.__class__, dtype=dtype
)
def __repr__(self):
# don't want to print out all of the items here
name = pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = "{name}: {len} dtype: {dtype}".format(
name=name, len=len(self), dtype=self.dtype
)
else:
shape = " x ".join(pprint_thing(s) for s in self.shape)
result = "{name}: {index}, {shape}, dtype: {dtype}".format(
name=name,
index=pprint_thing(self.mgr_locs.indexer),
shape=shape,
dtype=self.dtype,
)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = libinternals.BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
if getattr(self.values, "_pandas_ftype", False):
dtype = self.dtype.subtype
else:
dtype = self.dtype
return "{dtype}:{ftype}".format(dtype=dtype, ftype=self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._concatenator(
[blk.values for blk in to_concat], axis=self.ndim - 1
)
return self.make_block_same_class(
values, placement=placement or slice(0, len(values), 1)
)
def iget(self, i):
return self.values[i]
def set(self, locs, values):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not
one
"""
with np.errstate(all="ignore"):
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result, ndim=self.ndim))
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
inplace = validate_bool_kwarg(inplace, "inplace")
mask = isna(self.values)
if limit is not None:
if not is_integer(limit):
raise ValueError("Limit must be an integer")
if limit < 1:
raise ValueError("Limit must be greater than 0")
mask[mask.cumsum(self.ndim - 1) > limit] = False
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
if self._can_hold_element(value):
# equivalent: _try_coerce_args(value) would not raise
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
# operate column-by-column
def f(mask, val, idx):
block = self.coerce_to_target_dtype(value)
# slice out our block
if idx is not None:
# i.e. self.ndim == 2
block = block.getitem_block(slice(idx, idx + 1))
return block.fillna(value, limit=limit, inplace=inplace, downcast=None)
return self.split_and_operate(None, f, inplace)
def split_and_operate(self, mask, f, inplace: bool):
"""
split the block per-column, and apply the callable f
per-column, return a new block for each. Handle
masking which will not change a block unless needed.
Parameters
----------
mask : 2-d boolean mask
f : callable accepting (1d-mask, 1d values, indexer)
inplace : boolean
Returns
-------
list of blocks
"""
if mask is None:
mask = np.broadcast_to(True, shape=self.shape)
new_values = self.values
def make_a_block(nv, ref_loc):
if isinstance(nv, list):
assert len(nv) == 1, nv
assert isinstance(nv[0], Block)
block = nv[0]
else:
# Put back the dimension that was taken from it and make
# a block out of the result.
nv = _block_shape(nv, ndim=self.ndim)
block = self.make_block(values=nv, placement=ref_loc)
return block
# ndim == 1
if self.ndim == 1:
if mask.any():
nv = f(mask, new_values, None)
else:
nv = new_values if inplace else new_values.copy()
block = make_a_block(nv, self.mgr_locs)
return [block]
# ndim > 1
new_blocks = []
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
nv = f(m, v, i)
else:
nv = v if inplace else v.copy()
block = make_a_block(nv, [ref_loc])
new_blocks.append(block)
return new_blocks
def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]:
# no need to downcast our float
# unless indicated
if downcast is None and (
self.is_float or self.is_timedelta or self.is_datetime
):
return blocks
return _extend_blocks([b.downcast(downcast) for b in blocks])
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = "infer"
nv = maybe_downcast_to_dtype(values, dtypes)
return self.make_block(nv)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == "infer" or isinstance(dtypes, dict)):
raise ValueError(
"downcast must have a dictionary or 'infer' as its argument"
)
elif dtypes != "infer":
raise AssertionError("dtypes as dict is not supported yet")
# operate column-by-column
# this is expensive as it splits the blocks items-by-item
def f(mask, val, idx):
val = maybe_downcast_to_dtype(val, dtype="infer")
return val
return self.split_and_operate(None, f, False)
def astype(self, dtype, copy=False, errors="raise", **kwargs):
return self._astype(dtype, copy=copy, errors=errors, **kwargs)
def _astype(self, dtype, copy=False, errors="raise", **kwargs):
"""Coerce to the new type
Parameters
----------
dtype : str, dtype convertible
copy : boolean, default False
copy if indicated
errors : str, {'raise', 'ignore'}, default 'ignore'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Returns
-------
Block
"""
errors_legal_values = ("raise", "ignore")
if errors not in errors_legal_values:
invalid_arg = (
"Expected value of kwarg 'errors' to be one of {}. "
"Supplied value is '{}'".format(list(errors_legal_values), errors)
)
raise ValueError(invalid_arg)
if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):
msg = (
"Expected an instance of {}, but got the class instead. "
"Try instantiating 'dtype'.".format(dtype.__name__)
)
raise TypeError(msg)
# may need to convert to categorical
if self.is_categorical_astype(dtype):
# deprecated 17636
for deprecated_arg in ("categories", "ordered"):
if deprecated_arg in kwargs:
raise ValueError(
"Got an unexpected argument: {}".format(deprecated_arg)
)
categories = kwargs.get("categories", None)
ordered = kwargs.get("ordered", None)
if com.any_not_none(categories, ordered):
dtype = CategoricalDtype(categories, ordered)
if is_categorical_dtype(self.values):
# GH 10696/18593: update an existing categorical efficiently
return self.make_block(self.values.astype(dtype, copy=copy))
return self.make_block(Categorical(self.values, dtype=dtype))
dtype = pandas_dtype(dtype)
# astype processing
if is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
# force the copy here
if self.is_extension:
# TODO: Should we try/except this astype?
values = self.values.astype(dtype)
else:
if issubclass(dtype.type, str):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.get_values()
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
vals1d = values.ravel()
try:
values = astype_nansafe(vals1d, dtype, copy=True, **kwargs)
except (ValueError, TypeError):
# e.g. astype_nansafe can fail on object-dtype of strings
# trying to convert to float
if errors == "raise":
raise
newb = self.copy() if copy else self
return newb
# TODO(extension)
# should we make this attribute?
if isinstance(values, np.ndarray):
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError(
"cannot set astype for copy = [{copy}] for dtype "
"({dtype} [{shape}]) to different shape "
"({newb_dtype} [{newb_shape}])".format(
copy=copy,
dtype=self.dtype.name,
shape=self.shape,
newb_dtype=newb.dtype.name,
newb_shape=newb.shape,
)
)
return newb
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
coerce: bool = False,
):
""" attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
"""
return self.copy() if copy else self
def _can_hold_element(self, element: Any) -> bool:
""" require the same dtype as ourselves """
dtype = self.values.dtype.type
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, dtype)
return isinstance(element, dtype)
def _try_coerce_args(self, other):
""" provide coercion to our input arguments """
if np.any(notna(other)) and not self._can_hold_element(other):
# coercion issues
# let higher levels handle
raise TypeError(
"cannot convert {} to an {}".format(
type(other).__name__,
type(self).__name__.lower().replace("Block", ""),
)
)
if np.any(isna(other)) and not self._can_hold_na:
raise TypeError(
"cannot convert {} to an {}".format(
type(other).__name__,
type(self).__name__.lower().replace("Block", ""),
)
)
return other
def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.get_values()
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
if not self.is_object and not quoting:
itemsize = writers.word_len(na_rep)
values = values.astype("<U{size}".format(size=itemsize))
else:
values = np.array(values, dtype="object")
values[mask] = na_rep
return values
# block actions #
def copy(self, deep=True):
""" copy constructor """
values = self.values
if deep:
values = values.copy()
return self.make_block_same_class(values, ndim=self.ndim)
def replace(
self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
):
"""replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API compatibility.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
original_to_replace = to_replace
# If we cannot replace with own dtype, convert to ObjectBlock and
# retry
if not self._can_hold_element(to_replace):
if not isinstance(to_replace, list):
if inplace:
return [self]
return [self.copy()]
to_replace = [x for x in to_replace if self._can_hold_element(x)]
if not len(to_replace):
# GH#28084 avoid costly checks since we can infer
# that there is nothing to replace in this block
if inplace:
return [self]
return [self.copy()]
if len(to_replace) == 1:
# _can_hold_element checks have reduced this back to the
# scalar case and we can avoid a costly object cast
return self.replace(
to_replace[0],
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
if is_object_dtype(self):
raise AssertionError
# try again with a compatible block
block = self.astype(object)
return block.replace(
to_replace=to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
values = self.values
to_replace = self._try_coerce_args(to_replace)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
try:
blocks = self.putmask(mask, value, inplace=inplace)
# Note: it is _not_ the case that self._can_hold_element(value)
# is always true at this point. In particular, that can fail
# for:
# "2u" with bool-dtype, float-dtype
# 0.5 with int64-dtype
# np.nan with int64-dtype
except (TypeError, ValueError):
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
if is_object_dtype(self):
raise
assert not self._can_hold_element(value), value
# try again with a compatible block
block = self.astype(object)
return block.replace(
to_replace=original_to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
if convert:
blocks = [b.convert(numeric=False, copy=not inplace) for b in blocks]
return blocks
def _replace_single(self, *args, **kwargs):
""" no-op on a non-ObjectBlock """
return self if kwargs["inplace"] else self.copy()
def setitem(self, indexer, value):
"""Set the value inplace, returning a a maybe different typed block.
Parameters
----------
indexer : tuple, list-like, array-like, slice
The subset of self.values to set
value : object
The value being set
Returns
-------
Block
Notes
-----
`indexer` is a direct slice/positional indexer. `value` must
be a compatible shape.
"""
transpose = self.ndim == 2
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce if block dtype can store value
values = self.values
if self._can_hold_element(value):
value = self._try_coerce_args(value)
else:
# current dtype cannot store value, coerce to common dtype
find_dtype = False
if hasattr(value, "dtype"):
dtype = value.dtype
find_dtype = True
elif lib.is_scalar(value) and not isna(value):
dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
find_dtype = True
if find_dtype:
dtype = find_common_type([values.dtype, dtype])
if not is_dtype_equal(self.dtype, dtype):
b = self.astype(dtype)
return b.setitem(indexer, value)
# value must be storeable at this moment
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = maybe_promote(arr_value.dtype)
values = values.astype(dtype)
if transpose:
values = values.T
# length checking
check_setitem_lengths(indexer, value, values)
if is_empty_indexer(indexer, arr_value):
# GH#8669 empty indexers
pass
elif is_scalar_indexer(indexer, arr_value):
# setting a single element for each dim and with a rhs that could
# be e.g. a list; see GH#6043
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif (
len(arr_value.shape)
and arr_value.shape[0] == values.shape[0]
and arr_value.size == values.size
):
values[indexer] = value
try:
values = values.astype(arr_value.dtype)
except ValueError:
pass
# set
else:
values[indexer] = value
if transpose:
values = values.T
block = self.make_block(values)
return block
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a list of new blocks, the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new = getattr(new, "values", new)
mask = getattr(mask, "values", mask)
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isna(new) and not self.is_object:
# FIXME: make sure we have compatible NA
new = self.fill_value
if self._can_hold_element(new):
new = self._try_coerce_args(new)
if transpose:
new_values = new_values.T
# If the default repeat behavior in np.putmask would go in the
# wrong direction, then explicitly repeat and reshape new instead
if getattr(new, "ndim", 0) >= 1:
if self.ndim - 1 == new.ndim and axis == 1:
new = np.repeat(new, new_values.shape[-1]).reshape(self.shape)
new = new.astype(new_values.dtype)
# we require exact matches between the len of the
# values we are setting (or is compat). np.putmask
# doesn't check this and will simply truncate / pad
# the output, but we want sane error messages
#
# TODO: this prob needs some better checking
# for 2D cases
if (
is_list_like(new)
and np.any(mask[mask])
and getattr(new, "ndim", 1) == 1
):
if not (
mask.shape[-1] == len(new)
or mask[mask].shape[-1] == len(new)
or len(new) == 1
):
raise ValueError("cannot assign mismatch length to masked array")
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
if transpose:
mask = mask.T
if isinstance(new, np.ndarray):
new = new.T
axis = new_values.ndim - axis - 1
# Pseudo-broadcast
if getattr(new, "ndim", 0) >= 1:
if self.ndim - 1 == new.ndim:
new_shape = list(new.shape)
new_shape.insert(axis, 1)
new = new.reshape(tuple(new_shape))
# operate column-by-column
def f(mask, val, idx):
if idx is None:
# ndim==1 case.
n = new
else:
if isinstance(new, np.ndarray):
n = np.squeeze(new[idx % new.shape[0]])
else:
n = np.array(new)
# type of the new block
dtype, _ = maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(val, mask, n)
return nv
new_blocks = self.split_and_operate(mask, f, inplace)
return new_blocks
if inplace:
return [self]
if transpose:
new_values = new_values.T
return [self.make_block(new_values)]
def coerce_to_target_dtype(self, other):
"""
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block
"""
# if we cannot then coerce to object
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if is_dtype_equal(self.dtype, dtype):
return self
if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
return self.astype(object)
elif (self.is_float or self.is_complex) and (
is_integer_dtype(dtype) or is_float_dtype(dtype)
):
# don't coerce float/complex to int
return self
elif (
self.is_datetime
or is_datetime64_dtype(dtype)
or is_datetime64tz_dtype(dtype)
):
# not a datetime
if not (
(is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype))
and self.is_datetime
):
return self.astype(object)
# don't upcast timezone with different timezone or no timezone
mytz = getattr(self.dtype, "tz", None)
othertz = getattr(dtype, "tz", None)
if not tz_compare(mytz, othertz):
return self.astype(object)
raise AssertionError(
"possible recursion in "
"coerce_to_target_dtype: {} {}".format(self, other)
)
elif self.is_timedelta or is_timedelta64_dtype(dtype):
# not a timedelta
if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
return self.astype(object)
raise AssertionError(
"possible recursion in "
"coerce_to_target_dtype: {} {}".format(self, other)
)
try:
return self.astype(dtype)
except (ValueError, TypeError, OverflowError):
return self.astype(object)
def interpolate(
self,
method="pad",
axis=0,
index=None,
values=None,
inplace=False,
limit=None,
limit_direction="forward",
limit_area=None,
fill_value=None,
coerce=False,
downcast=None,
**kwargs
):
inplace = validate_bool_kwarg(inplace, "inplace")
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = missing.clean_fill_method(method)
except ValueError:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(
method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast,
)
# validate the interp method
m = missing.clean_interp_method(method, **kwargs)
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(
method=m,
index=index,
values=values,
axis=axis,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs
)
def _interpolate_with_fill(
self,
method="pad",
axis=0,
inplace=False,
limit=None,
fill_value=None,
coerce=False,
downcast=None,
):
""" fillna but using the interpolate machinery """
inplace = validate_bool_kwarg(inplace, "inplace")
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
fill_value = self._try_coerce_args(fill_value)
values = missing.interpolate_2d(
values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype,
)
blocks = [self.make_block_same_class(values, ndim=self.ndim)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(
self,
method=None,
index=None,
values=None,
fill_value=None,
axis=0,
limit=None,
limit_direction="forward",
limit_area=None,
inplace=False,
downcast=None,
**kwargs
):
""" interpolate using scipy wrappers """
inplace = validate_bool_kwarg(inplace, "inplace")
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ("krogh", "piecewise_polynomial", "pchip"):
if not index.is_monotonic:
raise ValueError(
"{0} interpolation requires that the "
"index be monotonic.".format(method)
)
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to missing.interpolate_1d
return missing.interpolate_1d(
index,
x,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
bounds_error=False,
**kwargs
)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [self.make_block_same_class(interp_values)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if fill_tuple is None:
fill_value = self.fill_value
allow_fill = False
else:
fill_value = fill_tuple[0]
allow_fill = True
new_values = algos.take_nd(
values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value
)
# Called from three places in managers, all of which satisfy
# this assertion
assert not (axis == 0 and new_mgr_locs is None)
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def diff(self, n: int, axis: int = 1) -> List["Block"]:
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values)]
def shift(self, periods, axis=0, fill_value=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(self.values, fill_value)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values)]
def where(
self,
other,
cond,
align=True,
errors="raise",
try_cast: bool = False,
axis: int = 0,
) -> List["Block"]:
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
axis : int
Returns
-------
a new block(s), the result of the func
"""
import pandas.core.computation.expressions as expressions
assert errors in ["raise", "ignore"]
transpose = self.ndim == 2
values = self.values
orig_other = other
if transpose:
values = values.T
other = getattr(other, "_values", getattr(other, "values", other))
cond = getattr(cond, "values", cond)
# If the default broadcasting would go in the wrong direction, then
# explicitly reshape other instead
if getattr(other, "ndim", 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1,)))
elif transpose and values.ndim == self.ndim - 1:
cond = cond.T
if not hasattr(cond, "shape"):
raise ValueError("where must have a condition that is ndarray like")
# our where function
def func(cond, values, other):
if not (
(self.is_integer or self.is_bool)
and lib.is_float(other)
and np.isnan(other)
):
# np.where will cast integer array to floats in this case
other = self._try_coerce_args(other)
fastres = expressions.where(cond, values, other)
return fastres
if cond.ravel().all():
result = values
else:
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
try:
result = func(cond, values, other)
except TypeError:
# we cannot coerce, return a compat dtype
# we are explicitly ignoring errors
block = self.coerce_to_target_dtype(other)
blocks = block.where(
orig_other,
cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=axis,
)
return self._maybe_downcast(blocks, "infer")
if self._can_hold_na or self.ndim == 1:
if transpose:
result = result.T
return [self.make_block(result)]
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
taken = result.take(m.nonzero()[0], axis=axis)
r = maybe_downcast_numeric(taken, self.dtype)
nb = self.make_block(r.T, placement=self.mgr_locs[m])
result_blocks.append(nb)
return result_blocks
def equals(self, other) -> bool:
if self.dtype != other.dtype or self.shape != other.shape:
return False
return array_equivalent(self.values, other.values)
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
"""Return a list of unstacked blocks of self
Parameters
----------
unstacker_func : callable
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
n_rows : int
Only used in ExtensionBlock._unstack
fill_value : int
Only used in ExtensionBlock._unstack
Returns
-------
blocks : list of Block
New blocks of unstacked values.
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
unstacker = unstacker_func(self.values.T)
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
new_values = new_values.T[mask]
new_placement = new_placement[mask]
blocks = [make_block(new_values, placement=new_placement)]
return blocks, mask
def quantile(self, qs, interpolation="linear", axis=0):
"""
compute the quantiles of the
Parameters
----------
qs: a scalar or list of the quantiles to be computed
interpolation: type of interpolation, default 'linear'
axis: axis to compute, default 0
Returns
-------
Block
"""
# We should always have ndim == 2 becase Series dispatches to DataFrame
assert self.ndim == 2
values = self.get_values()
is_empty = values.shape[axis] == 0
orig_scalar = not is_list_like(qs)
if orig_scalar:
# make list-like, unpack later
qs = [qs]
if is_empty:
# create the array of na_values
# 2d len(values) * len(qs)
result = np.repeat(
np.array([self.fill_value] * len(qs)), len(values)
).reshape(len(values), len(qs))
else:
# asarray needed for Sparse, see GH#24600
mask = np.asarray(isna(values))
result = nanpercentile(
values,
np.array(qs) * 100,
axis=axis,
na_value=self.fill_value,
mask=mask,
ndim=values.ndim,
interpolation=interpolation,
)
result = np.array(result, copy=False)
result = result.T
if orig_scalar and not lib.is_scalar(result):
# result could be scalar in case with is_empty and self.ndim == 1
assert result.shape[-1] == 1, result.shape
result = result[..., 0]
result = lib.item_from_zerodim(result)
ndim = np.ndim(result)
return make_block(result, placement=np.arange(len(result)), ndim=ndim)
def _replace_coerce(
self, to_replace, value, inplace=True, regex=False, convert=False, mask=None
):
"""
Replace value corresponding to the given boolean array with another
value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
A new block if there is anything to replace or the original block.
"""
if mask.any():
if not regex:
self = self.coerce_to_target_dtype(value)
return self.putmask(mask, value, inplace=inplace)
else:
return self._replace_single(
to_replace,
value,
inplace=inplace,
regex=regex,
convert=convert,
mask=mask,
)
return self
class NonConsolidatableMixIn:
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
def __init__(self, values, placement, ndim=None):
"""Initialize a non-consolidatable block.
'ndim' may be inferred from 'placement'.
This will call continue to call __init__ for the other base
classes mixed in with this Mixin.
"""
# Placement must be converted to BlockPlacement so that we can check
# its length
if not isinstance(placement, libinternals.BlockPlacement):
placement = libinternals.BlockPlacement(placement)
# Maybe infer ndim from placement
if ndim is None:
if len(placement) != 1:
ndim = 1
else:
ndim = 2
super().__init__(values, placement, ndim=ndim)
@property
def shape(self):
if self.ndim == 1:
return ((len(self.values)),)
return (len(self.mgr_locs), len(self.values))
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if not com.is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
elif isinstance(col, slice):
if col != slice(None):
raise NotImplementedError(col)
return self.values[[loc]]
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block, the result of the putmask
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# use block's copy logic.
# .values may be an Index which does shallow copy by default
new_values = self.values if inplace else self.copy().values
new = self._try_coerce_args(new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
mask = _safe_reshape(mask, new_values.shape)
new_values[mask] = new
return [self.make_block(values=new_values)]
def _get_unstack_items(self, unstacker, new_columns):
"""
Get the placement, values, and mask for a Block unstack.
This is shared between ObjectBlock and ExtensionBlock. They
differ in that ObjectBlock passes the values, while ExtensionBlock
passes the dummy ndarray of positions to be used by a take
later.
Parameters
----------
unstacker : pandas.core.reshape.reshape._Unstacker
new_columns : Index
All columns of the unstacked BlockManager.
Returns
-------
new_placement : ndarray[int]
The placement of the new columns in `new_columns`.
new_values : Union[ndarray, ExtensionArray]
The first return value from _Unstacker.get_new_values.
mask : ndarray[bool]
The second return value from _Unstacker.get_new_values.
"""
# shared with ExtensionBlock
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
return new_placement, new_values, mask
class ExtensionBlock(NonConsolidatableMixIn, Block):
"""Block for holding extension types.
Notes
-----
This holds all 3rd-party extension array types. It's also the immediate
parent class for our internal extension types' blocks, CategoricalBlock.
ExtensionArrays are limited to 1-D.
"""
is_extension = True
def __init__(self, values, placement, ndim=None):
values = self._maybe_coerce_values(values)
super().__init__(values, placement, ndim)
def _maybe_coerce_values(self, values):
"""
Unbox to an extension array.
This will unbox an ExtensionArray stored in an Index or Series.
ExtensionArrays pass through. No dtype coercion is done.
Parameters
----------
values : Index, Series, ExtensionArray
Returns
-------
ExtensionArray
"""
return extract_array(values)
@property
def _holder(self):
# For extension blocks, the holder is values-dependent.
return type(self.values)
@property
def fill_value(self):
# Used in reindex_indexer
return self.values.dtype.na_value
@property
def _can_hold_na(self):
# The default ExtensionArray._can_hold_na is True
return self._holder._can_hold_na
@property
def is_view(self):
"""Extension arrays are never treated as views."""
return False
@property
def is_numeric(self):
return self.values.dtype._is_numeric
def setitem(self, indexer, value):
"""Set the value inplace, returning a same-typed block.
This differs from Block.setitem by not allowing setitem to change
the dtype of the Block.
Parameters
----------
indexer : tuple, list-like, array-like, slice
The subset of self.values to set
value : object
The value being set
Returns
-------
Block
Notes
-----
`indexer` is a direct slice/positional indexer. `value` must
be a compatible shape.
"""
if isinstance(indexer, tuple):
# we are always 1-D
indexer = indexer[0]
check_setitem_lengths(indexer, value, self.values)
self.values[indexer] = value
return self
def get_values(self, dtype=None):
# ExtensionArrays must be iterable, so this works.
values = np.asarray(self.values)
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def to_dense(self):
return np.asarray(self.values)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True)
# Called from three places in managers, all of which satisfy
# this assertion
assert not (self.ndim == 1 and new_mgr_locs is None)
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def _can_hold_element(self, element: Any) -> bool:
# XXX: We may need to think about pushing this onto the array.
# We're doing the same as CategoricalBlock here.
return True
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
slicer = slicer[1]
return self.values[slicer]
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._holder._concat_same_type([blk.values for blk in to_concat])
placement = placement or slice(0, len(values), 1)
return self.make_block_same_class(values, ndim=self.ndim, placement=placement)
def fillna(self, value, limit=None, inplace=False, downcast=None):
values = self.values if inplace else self.values.copy()
values = values.fillna(value=value, limit=limit)
return [
self.make_block_same_class(
values=values, placement=self.mgr_locs, ndim=self.ndim
)
]
def interpolate(
self, method="pad", axis=0, inplace=False, limit=None, fill_value=None, **kwargs
):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(
values=values.fillna(value=fill_value, method=method, limit=limit),
placement=self.mgr_locs,
)
def shift(
self,
periods: int,
axis: libinternals.BlockPlacement = 0,
fill_value: Any = None,
) -> List["ExtensionBlock"]:
"""
Shift the block by `periods`.
Dispatches to underlying ExtensionArray and re-boxes in an
ExtensionBlock.
"""
return [
self.make_block_same_class(
self.values.shift(periods=periods, fill_value=fill_value),
placement=self.mgr_locs,
ndim=self.ndim,
)
]
def where(
self,
other,
cond,
align=True,
errors="raise",
try_cast: bool = False,
axis: int = 0,
) -> List["Block"]:
if isinstance(other, ABCDataFrame):
# ExtensionArrays are 1-D, so if we get here then
# `other` should be a DataFrame with a single column.
assert other.shape[1] == 1
other = other.iloc[:, 0]
other = extract_array(other, extract_numpy=True)
if isinstance(cond, ABCDataFrame):
assert cond.shape[1] == 1
cond = cond.iloc[:, 0]
cond = extract_array(cond, extract_numpy=True)
if lib.is_scalar(other) and isna(other):
# The default `other` for Series / Frame is np.nan
# we want to replace that with the correct NA value
# for the type
other = self.dtype.na_value
if is_sparse(self.values):
# TODO(SparseArray.__setitem__): remove this if condition
# We need to re-infer the type of the data after doing the
# where, for cases where the subtypes don't match
dtype = None
else:
dtype = self.dtype
result = self.values.copy()
icond = ~cond
if lib.is_scalar(other):
set_other = other
else:
set_other = other[icond]
try:
result[icond] = set_other
except (NotImplementedError, TypeError):
# NotImplementedError for class not implementing `__setitem__`
# TypeError for SparseArray, which implements just to raise
# a TypeError
result = self._holder._from_sequence(
np.where(cond, self.values, other), dtype=dtype
)
return [self.make_block_same_class(result, placement=self.mgr_locs)]
@property
def _ftype(self):
return getattr(self.values, "_pandas_ftype", Block._ftype)
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
# ExtensionArray-safe unstack.
# We override ObjectBlock._unstack, which unstacks directly on the
# values of the array. For EA-backed blocks, this would require
# converting to a 2-D ndarray of objects.
# Instead, we unstack an ndarray of integer positions, followed by
# a `take` on the actual values.
dummy_arr = np.arange(n_rows)
dummy_unstacker = functools.partial(unstacker_func, fill_value=-1)
unstacker = dummy_unstacker(dummy_arr)
new_placement, new_values, mask = self._get_unstack_items(
unstacker, new_columns
)
blocks = [
self.make_block_same_class(
self.values.take(indices, allow_fill=True, fill_value=fill_value),
[place],
)
for indices, place in zip(new_values.T, new_placement)
]
return blocks, mask
class ObjectValuesExtensionBlock(ExtensionBlock):
"""
Block providing backwards-compatibility for `.values`.
Used by PeriodArray and IntervalArray to ensure that
Series[T].values is an ndarray of objects.
"""
def external_values(self, dtype=None):
return self.values.astype(object)
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other) -> bool:
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(
tipo.type, (np.datetime64, np.timedelta64)
)
return isinstance(
element, (float, int, np.floating, np.int_)
) and not isinstance(
element,
(bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64),
)
def to_native_types(
self,
slicer=None,
na_rep="",
float_format=None,
decimal=".",
quoting=None,
**kwargs
):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
# see gh-13418: no special formatting is desired at the
# output (important for appropriate 'quoting' behaviour),
# so do not pass it through the FloatArrayFormatter
if float_format is None and decimal == ".":
mask = isna(values)
if not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype="object")
values[mask] = na_rep
return values
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(
values,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
fixed_width=False,
)
return formatter.get_result_as_array()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating))
return isinstance(
element, (float, int, complex, np.float_, np.int_)
) and not isinstance(element, (bool, np.bool_))
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return (
issubclass(tipo.type, np.integer)
and not issubclass(tipo.type, (np.datetime64, np.timedelta64))
and self.dtype.itemsize >= tipo.itemsize
)
return is_integer(element)
def should_store(self, value):
return is_integer_dtype(value) and value.dtype == self.dtype
class DatetimeLikeBlockMixin:
"""Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock."""
@property
def _holder(self):
return DatetimeArray
@property
def fill_value(self):
return np.datetime64("NaT", "ns")
def get_values(self, dtype=None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
if is_object_dtype(dtype):
values = self.values.ravel()
result = self._holder(values).astype(object)
return result.reshape(self.values.shape)
return self.values
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
is_datetime = True
def __init__(self, values, placement, ndim=None):
values = self._maybe_coerce_values(values)
super().__init__(values, placement=placement, ndim=ndim)
@property
def _can_hold_na(self):
return True
def _maybe_coerce_values(self, values):
"""
Input validation for values passed to __init__. Ensure that
we have datetime64ns, coercing if necessary.
Parameters
----------
values : array-like
Must be convertible to datetime64
Returns
-------
values : ndarray[datetime64ns]
Overridden by DatetimeTZBlock.
"""
if values.dtype != _NS_DTYPE:
values = conversion.ensure_datetime64ns(values)
if isinstance(values, DatetimeArray):
values = values._data
assert isinstance(values, np.ndarray), type(values)
return values
def _astype(self, dtype, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
"""
dtype = pandas_dtype(dtype)
# if we are passed a datetime64[ns, tz]
if is_datetime64tz_dtype(dtype):
values = self.values
if getattr(values, "tz", None) is None:
values = DatetimeArray(values).tz_localize("UTC")
values = values.tz_convert(dtype.tz)
return self.make_block(values)
# delegate
return super()._astype(dtype=dtype, **kwargs)
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
if self.is_datetimetz:
# require exact match, since non-nano does not exist
return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype(
element, self.dtype
)
# GH#27419 if we get a non-nano datetime64 object
return is_datetime64_dtype(tipo)
elif element is NaT:
return True
elif isinstance(element, datetime):
if self.is_datetimetz:
return tz_compare(element.tzinfo, self.dtype.tz)
return element.tzinfo is None
return is_valid_nat_for_dtype(element, self.dtype)
def _try_coerce_args(self, other):
"""
Coerce other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
other : ndarray-like or scalar
Returns
-------
base-type other
"""
if is_valid_nat_for_dtype(other, self.dtype):
other = np.datetime64("NaT", "ns")
elif isinstance(other, (datetime, np.datetime64, date)):
other = Timestamp(other)
if other.tz is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a naive Block")
other = other.asm8
elif hasattr(other, "dtype") and is_datetime64_dtype(other):
# TODO: can we get here with non-nano?
pass
else:
# coercion issues
# let higher levels handle
raise TypeError(other)
return other
def to_native_types(
self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs
):
""" convert to our native types format, slicing if desired """
values = self.values
i8values = self.values.view("i8")
if slicer is not None:
values = values[..., slicer]
i8values = i8values[..., slicer]
from pandas.io.formats.format import _get_format_datetime64_from_values
fmt = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(
i8values.ravel(),
tz=getattr(self.values, "tz", None),
format=fmt,
na_rep=na_rep,
).reshape(i8values.shape)
return np.atleast_2d(result)
def should_store(self, value):
return (
issubclass(value.dtype.type, np.datetime64)
and not is_datetime64tz_dtype(value)
and not is_extension_array_dtype(value)
)
def set(self, locs, values):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
values = conversion.ensure_datetime64ns(values, copy=False)
self.values[locs] = values
def external_values(self):
return np.asarray(self.values.astype("datetime64[ns]", copy=False))
class DatetimeTZBlock(ExtensionBlock, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
is_datetimetz = True
is_extension = True
_can_hold_element = DatetimeBlock._can_hold_element
fill_value = np.datetime64("NaT", "ns")
@property
def _holder(self):
return DatetimeArray
def _maybe_coerce_values(self, values):
"""Input validation for values passed to __init__. Ensure that
we have datetime64TZ, coercing if necessary.
Parameters
----------
values : array-like
Must be convertible to datetime64
Returns
-------
values : DatetimeArray
"""
if not isinstance(values, self._holder):
values = self._holder(values)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
return values
@property
def is_view(self):
""" return a boolean if I am possibly a view """
# check the ndarray values of the DatetimeIndex values
return self.values._data.base is not None
def get_values(self, dtype=None):
"""
Returns an ndarray of values.
Parameters
----------
dtype : np.dtype
Only `object`-like dtypes are respected here (not sure
why).
Returns
-------
values : ndarray
When ``dtype=object``, then and object-dtype ndarray of
boxed values is returned. Otherwise, an M8[ns] ndarray
is returned.
DatetimeArray is always 1-d. ``get_values`` will reshape
the return value to be the same dimensionality as the
block.
"""
values = self.values
if is_object_dtype(dtype):
values = values.astype(object)
values = np.asarray(values)
if self.ndim == 2:
# Ensure that our shape is correct for DataFrame.
# ExtensionArrays are always 1-D, even in a DataFrame when
# the analogous NumPy-backed column would be a 2-D ndarray.
values = values.reshape(1, -1)
return values
def to_dense(self):
# we request M8[ns] dtype here, even though it discards tzinfo,
# as lots of code (e.g. anything using values_from_object)
# expects that behavior.
return np.asarray(self.values, dtype=_NS_DTYPE)
def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
if not com.is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
return self.values[slicer]
def _try_coerce_args(self, other):
# DatetimeArray handles this for us
return other
def diff(self, n: int, axis: int = 0) -> List["Block"]:
"""
1st discrete difference.
Parameters
----------
n : int
Number of periods to diff.
axis : int, default 0
Axis to diff upon.
Returns
-------
A list with a new TimeDeltaBlock.
Notes
-----
The arguments here are mimicking shift so they are called correctly
by apply.
"""
if axis == 0:
# Cannot currently calculate diff across multiple blocks since this
# function is invoked via apply
raise NotImplementedError
new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8
# Reshape the new_values like how algos.diff does for timedelta data
new_values = new_values.reshape(1, len(new_values))
new_values = new_values.astype("timedelta64[ns]")
return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
def concat_same_type(self, to_concat, placement=None):
# need to handle concat([tz1, tz2]) here, since DatetimeArray
# only handles cases where all the tzs are the same.
# Instead of placing the condition here, it could also go into the
# is_uniform_join_units check, but I'm not sure what is better.
if len({x.dtype for x in to_concat}) > 1:
values = concat_datetime([x.values for x in to_concat])
placement = placement or slice(0, len(values), 1)
if self.ndim > 1:
values = np.atleast_2d(values)
return ObjectBlock(values, ndim=self.ndim, placement=placement)
return super().concat_same_type(to_concat, placement)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# We support filling a DatetimeTZ with a `value` whose timezone
# is different by coercing to object.
if self._can_hold_element(value):
return super().fillna(value, limit, inplace, downcast)
# different timezones, or a non-tz
return self.astype(object).fillna(
value, limit=limit, inplace=inplace, downcast=downcast
)
def setitem(self, indexer, value):
# https://github.com/pandas-dev/pandas/issues/24020
# Need a dedicated setitem until #24020 (type promotion in setitem
# for extension arrays) is designed and implemented.
if self._can_hold_element(value) or (
isinstance(indexer, np.ndarray) and indexer.size == 0
):
return super().setitem(indexer, value)
obj_vals = self.values.astype(object)
newb = make_block(
obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim
)
return newb.setitem(indexer, value)
def equals(self, other) -> bool:
# override for significant performance improvement
if self.dtype != other.dtype or self.shape != other.shape:
return False
return (self.values.view("i8") == other.values.view("i8")).all()
def quantile(self, qs, interpolation="linear", axis=0):
naive = self.values.view("M8[ns]")
# kludge for 2D block with 1D values
naive = naive.reshape(self.shape)
blk = self.make_block(naive)
res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis)
# ravel is kludge for 2D block with 1D values, assumes column-like
aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)
return self.make_block_same_class(aware, ndim=res_blk.ndim)
class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
fill_value = np.timedelta64("NaT", "ns")
def __init__(self, values, placement, ndim=None):
if values.dtype != _TD_DTYPE:
values = conversion.ensure_timedelta64ns(values)
if isinstance(values, TimedeltaArray):
values = values._data
assert isinstance(values, np.ndarray), type(values)
super().__init__(values, placement=placement, ndim=ndim)
@property
def _holder(self):
return TimedeltaArray
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, np.timedelta64)
elif element is NaT:
return True
elif isinstance(element, (timedelta, np.timedelta64)):
return True
return is_valid_nat_for_dtype(element, self.dtype)
def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as nanoseconds
if is_integer(value):
# Deprecation GH#24694, GH#19233
warnings.warn(
"Passing integers to fillna is deprecated, will "
"raise a TypeError in a future version. To retain "
"the old behavior, pass pd.Timedelta(seconds=n) "
"instead.",
FutureWarning,
stacklevel=6,
)
value = Timedelta(value, unit="s")
return super().fillna(value, **kwargs)
def _try_coerce_args(self, other):
"""
Coerce values and other to datetime64[ns], with null values
converted to datetime64("NaT", "ns").
Parameters
----------
other : ndarray-like or scalar
Returns
-------
base-type other
"""
if is_valid_nat_for_dtype(other, self.dtype):
other = np.timedelta64("NaT", "ns")
elif isinstance(other, (timedelta, np.timedelta64)):
other = Timedelta(other).to_timedelta64()
elif hasattr(other, "dtype") and is_timedelta64_dtype(other):
# TODO: can we get here with non-nano dtype?
pass
else:
# coercion issues
# let higher levels handle
raise TypeError(other)
return other
def should_store(self, value):
return issubclass(
value.dtype.type, np.timedelta64
) and not is_extension_array_dtype(value)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = "NaT"
rvalues[mask] = na_rep
imask = (~mask).ravel()
# FIXME:
# should use the formats.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array(
[Timedelta(val)._repr_base(format="all") for val in values.ravel()[imask]],
dtype=object,
)
return rvalues
def external_values(self, dtype=None):
return np.asarray(self.values.astype("timedelta64[ns]", copy=False))
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, np.bool_)
return isinstance(element, (bool, np.bool_))
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_) and not is_extension_array_dtype(
value
)
def replace(
self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
):
inplace = validate_bool_kwarg(inplace, "inplace")
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super().replace(
to_replace,
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, placement=None, ndim=2):
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
super().__init__(values, ndim=ndim, placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
coerce: bool = False,
):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# operate column-by-column
def f(mask, val, idx):
shape = val.shape
values = soft_convert_objects(
val.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
if isinstance(values, np.ndarray):
# TODO: allow EA once reshape is supported
values = values.reshape(shape)
values = _block_shape(values, ndim=self.ndim)
return values
if self.ndim == 2:
blocks = self.split_and_operate(None, f, False)
else:
values = f(None, self.values.ravel(), None)
blocks = [make_block(values, ndim=self.ndim, placement=self.mgr_locs)]
return blocks
def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]:
if downcast is not None:
return blocks
# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])
def _can_hold_element(self, element: Any) -> bool:
return True
def _try_coerce_args(self, other):
""" provide coercion to our input arguments """
if isinstance(other, ABCDatetimeIndex):
# May get a DatetimeIndex here. Unbox it.
other = other.array
if isinstance(other, DatetimeArray):
# hit in pandas/tests/indexing/test_coercion.py
# ::TestWhereCoercion::test_where_series_datetime64[datetime64tz]
# when falling back to ObjectBlock.where
other = other.astype(object)
return other
def should_store(self, value):
return not (
issubclass(
value.dtype.type,
(np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_),
)
or
# TODO(ExtensionArray): remove is_extension_type
# when all extension arrays have been ported.
is_extension_type(value)
or is_extension_array_dtype(value)
)
def replace(
self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
):
to_rep_is_list = is_list_like(to_replace)
value_is_list = is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
result_blocks = []
blocks = [self]
if not either_list and is_re(to_replace):
return self._replace_single(
to_replace,
value,
inplace=inplace,
filter=filter,
regex=True,
convert=convert,
)
elif not (either_list or regex):
return super().replace(
to_replace,
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(
to_rep,
v,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
elif to_rep_is_list and regex:
for to_rep in to_replace:
result_blocks = []
for b in blocks:
result = b._replace_single(
to_rep,
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(
to_replace,
value,
inplace=inplace,
filter=filter,
convert=convert,
regex=regex,
)
def _replace_single(
self,
to_replace,
value,
inplace=False,
filter=None,
regex=False,
convert=True,
mask=None,
):
"""
Replace elements by the given value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
filter : list, optional
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
a new block, the result after replacing
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# to_replace is regex compilable
to_rep_re = regex and is_re_compilable(to_replace)
# regex is regex compilable
regex_re = is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError(
"only one of to_replace and regex can be regex compilable"
)
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
if is_re(to_replace):
pattern = to_replace.pattern
else:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
return super().replace(
to_replace, value, inplace=inplace, filter=filter, regex=regex
)
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isna(value) or not isinstance(value, str):
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return value if rx.search(s) is not None else s
else:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return rx.sub(value, s)
else:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
if mask is None:
new_values[filt] = f(new_values[filt])
else:
new_values[filt][mask] = f(new_values[filt][mask])
# convert
block = self.make_block(new_values)
if convert:
block = block.convert(numeric=False)
return block
def _replace_coerce(
self, to_replace, value, inplace=True, regex=False, convert=False, mask=None
):
"""
Replace value corresponding to the given boolean array with another
value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
A new block if there is anything to replace or the original block.
"""
if mask.any():
block = super()._replace_coerce(
to_replace=to_replace,
value=value,
inplace=inplace,
regex=regex,
convert=convert,
mask=mask,
)
if convert:
block = [b.convert(numeric=False, copy=True) for b in block]
return block
return self
class CategoricalBlock(ExtensionBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
_concatenator = staticmethod(concat_categorical)
def __init__(self, values, placement, ndim=None):
# coerce to categorical if we can
values = extract_array(values)
assert isinstance(values, Categorical), type(values)
super().__init__(values, placement=placement, ndim=ndim)
@property
def _holder(self):
return Categorical
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return np.object_
def to_dense(self):
# Categorical.get_values returns a DatetimeIndex for datetime
# categories, so we can't simply use `np.asarray(self.values)` like
# other types.
return self.values._internal_get_values()
def to_native_types(self, slicer=None, na_rep="", quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isna(values)
values = np.array(values, dtype="object")
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
Note that this CategoricalBlock._concat_same_type *may* not
return a CategoricalBlock. When the categories in `to_concat`
differ, this will return an object ndarray.
If / when we decide we don't like that behavior:
1. Change Categorical._concat_same_type to use union_categoricals
2. Delete this method.
"""
values = self._concatenator(
[blk.values for blk in to_concat], axis=self.ndim - 1
)
# not using self.make_block_same_class as values can be object dtype
return make_block(
values, placement=placement or slice(0, len(values), 1), ndim=self.ndim
)
def where(
self,
other,
cond,
align=True,
errors="raise",
try_cast: bool = False,
axis: int = 0,
) -> List["Block"]:
# TODO(CategoricalBlock.where):
# This can all be deleted in favor of ExtensionBlock.where once
# we enforce the deprecation.
object_msg = (
"Implicitly converting categorical to object-dtype ndarray. "
"One or more of the values in 'other' are not present in this "
"categorical's categories. A future version of pandas will raise "
"a ValueError when 'other' contains different categories.\n\n"
"To preserve the current behavior, add the new categories to "
"the categorical before calling 'where', or convert the "
"categorical to a different dtype."
)
try:
# Attempt to do preserve categorical dtype.
result = super().where(other, cond, align, errors, try_cast, axis)
except (TypeError, ValueError):
warnings.warn(object_msg, FutureWarning, stacklevel=6)
result = self.astype(object).where(
other, cond, align=align, errors=errors, try_cast=try_cast, axis=axis
)
return result
# -----------------------------------------------------------------
# Constructor Helpers
def get_block_type(values, dtype=None):
"""
Find the appropriate Block subclass to use for the given values and dtype.
Parameters
----------
values : ndarray-like
dtype : numpy or pandas dtype
Returns
-------
cls : class, subclass of Block
"""
dtype = dtype or values.dtype
vtype = dtype.type
if is_sparse(dtype):
# Need this first(ish) so that Sparse[datetime] is sparse
cls = ExtensionBlock
elif is_categorical(values):
cls = CategoricalBlock
elif issubclass(vtype, np.datetime64):
assert not is_datetime64tz_dtype(values)
cls = DatetimeBlock
elif is_datetime64tz_dtype(values):
cls = DatetimeTZBlock
elif is_interval_dtype(dtype) or is_period_dtype(dtype):
cls = ObjectValuesExtensionBlock
elif is_extension_array_dtype(values):
cls = ExtensionBlock
elif issubclass(vtype, np.floating):
cls = FloatBlock
elif issubclass(vtype, np.timedelta64):
assert issubclass(vtype, np.integer)
cls = TimeDeltaBlock
elif issubclass(vtype, np.complexfloating):
cls = ComplexBlock
elif issubclass(vtype, np.integer):
cls = IntBlock
elif dtype == np.bool_:
cls = BoolBlock
else:
cls = ObjectBlock
return cls
def make_block(values, placement, klass=None, ndim=None, dtype=None, fastpath=None):
# Ensure that we don't allow PandasArray / PandasDtype in internals.
# For now, blocks should be backed by ndarrays when possible.
if isinstance(values, ABCPandasArray):
values = values.to_numpy()
if ndim and ndim > 1:
values = np.atleast_2d(values)
if isinstance(dtype, PandasDtype):
dtype = dtype.numpy_dtype
if fastpath is not None:
# GH#19265 pyarrow is passing this
warnings.warn(
"fastpath argument is deprecated, will be removed in a future release.",
FutureWarning,
)
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(values, dtype)
elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values):
# TODO: This is no longer hit internally; does it need to be retained
# for e.g. pyarrow?
values = DatetimeArray._simple_new(values, dtype=dtype)
return klass(values, ndim=ndim, placement=placement)
# -----------------------------------------------------------------
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, givin the result """
from pandas.core.internals import BlockManager
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
elif isinstance(result, BlockManager):
blocks.extend(result.blocks)
else:
blocks.append(result)
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim < ndim:
if shape is None:
shape = values.shape
if not is_extension_array_dtype(values):
# TODO: https://github.com/pandas-dev/pandas/issues/23023
# block.shape is incorrect for "2D" ExtensionArrays
# We can't, and don't need to, reshape.
values = values.reshape(tuple((1,) + shape))
return values
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len({b.dtype for b in blocks}) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = np.vstack([b.values for b in blocks])
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values, placement=new_mgr_locs)
# no merge
return blocks
def _safe_reshape(arr, new_shape):
"""
If possible, reshape `arr` to have shape `new_shape`,
with a couple of exceptions (see gh-13012):
1) If `arr` is a ExtensionArray or Index, `arr` will be
returned as is.
2) If `arr` is a Series, the `_values` attribute will
be reshaped and returned.
Parameters
----------
arr : array-like, object to be reshaped
new_shape : int or tuple of ints, the new shape
"""
if isinstance(arr, ABCSeries):
arr = arr._values
if not isinstance(arr, ABCExtensionArray):
arr = arr.reshape(new_shape)
return arr
def _putmask_smart(v, mask, n):
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
mask : np.ndarray
Applies to both sides (array like).
n : `new values` either scalar or an array like aligned with `values`
Returns
-------
values : ndarray with updated values
this *may* be a copy of the original
See Also
--------
ndarray.putmask
"""
# we cannot use np.asarray() here as we cannot have conversions
# that numpy does when numeric are mixed with strings
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.repeat(n, len(mask))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[mask]
except TypeError:
# TypeError: only integer scalar arrays can be converted to a scalar index
pass
else:
# make sure that we have a nullable type
# if we have nulls
if not _isna_compat(v, nn[0]):
pass
elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):
# only compare integers/floats
pass
elif not (is_float_dtype(v.dtype) or is_integer_dtype(v.dtype)):
# only compare integers/floats
pass
else:
# we ignore ComplexWarning here
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", np.ComplexWarning)
nn_at = nn.astype(v.dtype)
comp = nn == nn_at
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[mask] = nn_at
return nv
n = np.asarray(n)
def _putmask_preserve(nv, n):
try:
nv[mask] = n[mask]
except (IndexError, ValueError):
nv[mask] = n
return nv
# preserves dtype if possible
if v.dtype.kind == n.dtype.kind:
return _putmask_preserve(v, n)
# change the dtype if needed
dtype, _ = maybe_promote(n.dtype)
if is_extension_type(v.dtype) and is_object_dtype(dtype):
v = v._internal_get_values(dtype)
else:
v = v.astype(dtype)
return _putmask_preserve(v, n)
| 32.034398 | 88 | 0.576363 |
9066a001534f44d9707fdd69e098fcd4ab19ce0d | 1,505 | py | Python | old_models/proto__4f_all_folds.py | artyompal/imet | 75d708392237a1392ac5fa2f598a747408a88f19 | [
"Apache-2.0"
] | 5 | 2019-06-11T09:11:56.000Z | 2020-05-06T16:05:26.000Z | old_models/proto__4f_all_folds.py | artyompal/imet | 75d708392237a1392ac5fa2f598a747408a88f19 | [
"Apache-2.0"
] | null | null | null | old_models/proto__4f_all_folds.py | artyompal/imet | 75d708392237a1392ac5fa2f598a747408a88f19 | [
"Apache-2.0"
] | 2 | 2019-06-12T14:14:35.000Z | 2019-07-18T15:06:14.000Z | #!/usr/bin/python3.6
import itertools, os, re, sys
from glob import glob
from typing import List
from debug import dprint
IN_KERNEL = os.environ.get('KAGGLE_WORKING_DIR') is not None
MODEL_PATH = '../input/' if IN_KERNEL else '../best_models/'
def run(command: List[str]) -> None:
res = os.system('export PYTHONPATH=${PYTHONPATH}:/kaggle/working && ' + ' '.join(command))
if res != 0:
sys.exit()
num_tta = 4
models = {
'4f_se_resnext101_352x352_aug2_f0_e24_0.6058.pth': 1,
'4f_se_resnext101_352x352_aug2_f1_e16_0.6081.pth': 1,
'4f_se_resnext101_352x352_aug2_f2_e10_0.6059.pth': 1,
'4f_se_resnext101_352x352_aug2_f3_e16_0.6053.pth': 1,
'4f_se_resnext101_352x352_aug2_f4_e15_0.6098.pth': 1,
}
model2path = {os.path.basename(path): path for path in glob(MODEL_PATH + '**/*.pth')}
for model in models.keys():
assert os.path.exists(model2path[model])
for model in models.keys():
m = re.match(r'(.*)_f(\d)_e\d+.*\.pth', os.path.basename(model))
assert m
script_name = f'train_{m.group(1)}.py'
fold = m.group(2)
cmd = ['python3.6', script_name, '--predict', '--weights', model2path[model],
'--fold', fold, '--num_tta', str(num_tta)]
print('running', cmd)
run(cmd)
cmd = ['python3.6', 'blend.py', 'submission.csv']
for model, weight in models.items():
name = os.path.splitext(os.path.basename(model))[0]
predict = f'pred_level1_{name}.npz'
cmd.extend([predict, str(weight)])
print('running', cmd)
run(cmd)
| 29.509804 | 94 | 0.667774 |
8054e9cd627c251b8c101db962b827ecedad7158 | 3,724 | py | Python | assignment1/run_experiment.py | yuhu0016/machine-learning-cs7641 | b746acb1573317ff1ba2fbf72a6f9bac667f340d | [
"MIT"
] | null | null | null | assignment1/run_experiment.py | yuhu0016/machine-learning-cs7641 | b746acb1573317ff1ba2fbf72a6f9bac667f340d | [
"MIT"
] | null | null | null | assignment1/run_experiment.py | yuhu0016/machine-learning-cs7641 | b746acb1573317ff1ba2fbf72a6f9bac667f340d | [
"MIT"
] | null | null | null | import argparse
from datetime import datetime
import logging
import numpy as np
import experiments
from data import loader
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def run_experiment(experiment_details, experiment, timing_key, verbose, timings):
t = datetime.now()
for details in experiment_details:
exp = experiment(details, verbose=verbose)
logger.info("Running {} experiment: {}".format(timing_key, details.ds_readable_name))
exp.perform()
t_d = datetime.now() - t
timings[timing_key] = t_d.seconds
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform some SL experiments')
parser.add_argument('--threads', type=int, default=1, help='Number of threads (defaults to 1, -1 for auto)')
parser.add_argument('--seed', type=int, help='A random seed to set, if desired')
parser.add_argument('--ann', action='store_true', help='Run the ANN experiment')
parser.add_argument('--boosting', action='store_true', help='Run the Boosting experiment')
parser.add_argument('--dt', action='store_true', help='Run the Decision Tree experiment')
parser.add_argument('--knn', action='store_true', help='Run the KNN experiment')
parser.add_argument('--svm', action='store_true', help='Run the SVM experiment')
parser.add_argument('--all', action='store_true', help='Run all experiments')
parser.add_argument('--verbose', action='store_true', help='If true, provide verbose output')
args = parser.parse_args()
verbose = args.verbose
threads = args.threads
seed = args.seed
if seed is None:
seed = np.random.randint(0, (2 ** 32) - 1)
print("Using seed {}".format(seed))
print("Loading data")
print("----------")
# ds1_details = {
# 'data': loader.HTRU2Data(verbose=verbose, seed=seed),
# 'name': 'HTRU2',
# 'readable_name': 'HTRU2',
# }
ds1_details = {
'data': loader.PenDigitData(verbose=verbose, seed=seed),
'name': 'pen_digits',
'readable_name': 'Handwritten Digits',
}
ds2_details = {
'data': loader.CreditDefaultData(verbose=verbose, seed=seed),
'name': 'credit_default',
'readable_name': 'Credit Default',
}
# ds2_details = {
# 'data': None,
# 'name': None,
# 'readable_name': None,
# }
if verbose:
print("----------")
print("Running experiments")
timings = {}
datasets = [
ds1_details,
ds2_details
]
experiment_details = []
for ds in datasets:
data = ds['data']
data.load_and_process()
data.build_train_test_split()
data.scale_standard()
experiment_details.append(experiments.ExperimentDetails(
data, ds['name'], ds['readable_name'],
threads=threads,
seed=seed
))
if args.ann or args.all:
run_experiment(experiment_details, experiments.ANNExperiment, 'ANN', verbose, timings)
if args.boosting or args.all:
run_experiment(experiment_details, experiments.BoostingExperiment, 'Boosting', verbose, timings)
if args.dt or args.all:
run_experiment(experiment_details, experiments.DTExperiment, 'DT', verbose, timings)
if args.knn or args.all:
run_experiment(experiment_details, experiments.KNNExperiment, 'KNN', verbose, timings)
if args.svm or args.all:
run_experiment(experiment_details, experiments.SVMExperiment, 'SVM', verbose, timings)
print(timings)
| 33.25 | 112 | 0.639903 |
b67bf5e62b966b79b86bcefb10feb36dc4f07868 | 1,372 | py | Python | mindspore/ops/_op_impl/tbe/npu_clear_float_status.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | mindspore/python/mindspore/ops/_op_impl/tbe/npu_clear_float_status.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | mindspore/python/mindspore/ops/_op_impl/tbe/npu_clear_float_status.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""NPUClearFloatStatus op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
npu_clear_float_status_op_info = TBERegOp("NPUClearFloatStatus") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("n_p_u_clear_float_status.so") \
.compute_cost(10) \
.kernel_name("n_p_u_clear_float_status") \
.partial_flag(True) \
.input(0, "addr", False, "required", "all") \
.output(0, "data", False, "required", "all") \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(npu_clear_float_status_op_info)
def _npu_clear_float_status_tbe():
"""NPUClearFloatStatus TBE register"""
return
| 38.111111 | 79 | 0.700437 |
cc2e27ffb49035025d16d9d3948895cc0f4fd181 | 415 | py | Python | pyproxy/settings.py | tukeJonny/pyproxy | 06d44c30d16145f3f568b6a5b593c272eb46fa39 | [
"MIT"
] | null | null | null | pyproxy/settings.py | tukeJonny/pyproxy | 06d44c30d16145f3f568b6a5b593c272eb46fa39 | [
"MIT"
] | null | null | null | pyproxy/settings.py | tukeJonny/pyproxy | 06d44c30d16145f3f568b6a5b593c272eb46fa39 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
import os
import shutil
from pathlib import Path
# CA Settings
BASE_PATH = Path('/', 'tmp', 'proxy_ca')
try:
shutil.rmtree(str(BASE_PATH))
except:
pass
os.mkdir(str(BASE_PATH))
SSL_BITS = '2048'
SSL_DAYS = '365'
# Proxy Handler Settings
CACERT_DISTRIBUTOR='http://pyproxy.cacert/'
REQUEST_TIMEOUT = 1
PROTOCOL_VERSION = 'HTTP/1.1'
# Server Settings
HOST = 'localhost'
PORT = 24365
| 17.291667 | 43 | 0.706024 |
bd7604c3bca3f4b8e6240e0947f502187404b8a2 | 528 | py | Python | utils/notify_admin.py | DurbeKK/room-finder-bot | 2d379cd8c70660dd01b4b18253a063acc6081786 | [
"MIT"
] | 2 | 2022-02-02T12:48:15.000Z | 2022-03-03T18:55:15.000Z | utils/notify_admin.py | DurbeKK/countdown-tg-bot | ed7152d642ed746ddfbde36fd126a8d958fd8127 | [
"MIT"
] | null | null | null | utils/notify_admin.py | DurbeKK/countdown-tg-bot | ed7152d642ed746ddfbde36fd126a8d958fd8127 | [
"MIT"
] | null | null | null | import logging
from aiogram import Dispatcher
from data.config import ADMIN
async def notify_on_startup(dp: Dispatcher):
"""Notify admin that the bot has launched."""
try:
await dp.bot.send_message(ADMIN, "Bot launched!")
except Exception as err:
logging.exception(err)
async def notify_on_shutdown(dp: Dispatcher):
"""Notify admin that the bot has shut down."""
try:
await dp.bot.send_message(ADMIN, "Bot shut down!")
except Exception as err:
logging.exception(err)
| 25.142857 | 58 | 0.689394 |
9167e77052fc4014f2584b3db9804048484431ac | 4,931 | py | Python | neuralmonkey/decoders/sequence_classifier.py | hoangcuong2011/LDNMT | b0154d4ee7aa776adf02ef6bba03c9312345038a | [
"BSD-3-Clause"
] | null | null | null | neuralmonkey/decoders/sequence_classifier.py | hoangcuong2011/LDNMT | b0154d4ee7aa776adf02ef6bba03c9312345038a | [
"BSD-3-Clause"
] | null | null | null | neuralmonkey/decoders/sequence_classifier.py | hoangcuong2011/LDNMT | b0154d4ee7aa776adf02ef6bba03c9312345038a | [
"BSD-3-Clause"
] | null | null | null | from typing import cast, Any, Callable, Iterable, Optional, List
import tensorflow as tf
from neuralmonkey.dataset import Dataset
from neuralmonkey.vocabulary import Vocabulary
from neuralmonkey.model.model_part import ModelPart, FeedDict
from neuralmonkey.nn.mlp import MultilayerPerceptron
from neuralmonkey.decorators import tensor
class SequenceClassifier(ModelPart):
"""A simple MLP classifier over encoders.
The API pretends it is an RNN decoder which always generates a sequence of
length exactly one.
"""
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
encoders: List[Any],
vocabulary: Vocabulary,
data_id: str,
layers: List[int],
activation_fn: Callable[[tf.Tensor], tf.Tensor]=tf.nn.relu,
dropout_keep_prob: float = 0.5,
save_checkpoint: Optional[str] = None,
load_checkpoint: Optional[str] = None) -> None:
"""Construct a new instance of the sequence classifier.
Args:
name: Name of the decoder. Should be unique accross all Neural
Monkey objects
encoders: Input encoders of the decoder
vocabulary: Target vocabulary
data_id: Target data series
layers: List defining structure of the NN. Ini example:
layers=[100,20,5] ;creates classifier with hidden layers of
size 100, 20, 5 and one output layer
depending on the size of vocabulary
activation_fn: activation function used on the output of each
hidden layer.
dropout_keep_prob: Probability of keeping a value during dropout
"""
ModelPart.__init__(self, name, save_checkpoint, load_checkpoint)
self.encoders = encoders
self.vocabulary = vocabulary
self.data_id = data_id
self.layers = layers
self.activation_fn = activation_fn
self.dropout_keep_prob = dropout_keep_prob
self.max_output_len = 1
tf.summary.scalar(
'train_optimization_cost',
self.cost, collections=["summary_train"])
# pylint: enable=too-many-arguments
# pylint: disable=no-self-use
@tensor
def train_mode(self) -> tf.Tensor:
return tf.placeholder(tf.bool, name="train_mode")
@tensor
def gt_inputs(self) -> List[tf.Tensor]:
return [tf.placeholder(tf.int32, shape=[None], name="targets")]
# pylint: enable=no-self-use
@tensor
def _mlp(self) -> MultilayerPerceptron:
mlp_input = tf.concat([enc.encoded for enc in self.encoders], 1)
return MultilayerPerceptron(
mlp_input, self.layers,
self.dropout_keep_prob, len(self.vocabulary),
activation_fn=self.activation_fn, train_mode=self.train_mode)
@tensor
def loss_with_gt_ins(self) -> tf.Tensor:
# pylint: disable=no-member,unsubscriptable-object
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self._mlp.logits, labels=self.gt_inputs[0]))
# pylint: enable=no-member,unsubscriptable-object
@property
def loss_with_decoded_ins(self) -> tf.Tensor:
return self.loss_with_gt_ins
@property
def cost(self) -> tf.Tensor:
return self.loss_with_gt_ins
@tensor
def decoded_seq(self) -> List[tf.Tensor]:
# pylint: disable=no-member
return [self._mlp.classification]
# pylint: enable=no-member
@tensor
def decoded_logits(self) -> List[tf.Tensor]:
# pylint: disable=no-member
return [self._mlp.logits]
# pylint: enable=no-member
@tensor
def runtime_logprobs(self) -> List[tf.Tensor]:
# pylint: disable=no-member
return [tf.nn.log_softmax(self._mlp.logits)]
# pylint: enable=no-member
@property
def train_loss(self):
return self.loss_with_gt_ins
@property
def runtime_loss(self):
return self.loss_with_decoded_ins
@property
def decoded(self):
return self.decoded_seq
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
sentences = cast(Iterable[List[str]],
dataset.get_series(self.data_id, allow_none=True))
sentences_list = list(sentences) if sentences is not None else None
fd = {} # type: FeedDict
if sentences is not None:
label_tensors, _ = self.vocabulary.sentences_to_tensor(
sentences_list, self.max_output_len)
# pylint: disable=unsubscriptable-object
fd[self.gt_inputs[0]] = label_tensors[0]
# pylint: enable=unsubscriptable-object
fd[self.train_mode] = train
return fd
| 34.725352 | 79 | 0.628676 |
718163d9eba9f24abe4269312f60bbea190ec52b | 6,475 | py | Python | hmtl/models/layerCorefSrlBert.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | hmtl/models/layerCorefSrlBert.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | hmtl/models/layerCorefSrlBert.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | # coding: utf-8
import logging
from typing import Dict
import torch
from transformers import AutoModel
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.modules import Seq2SeqEncoder
from allennlp.modules.span_extractors import (
SelfAttentiveSpanExtractor,
EndpointSpanExtractor,
)
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.nn import RegularizerApplicator, InitializerApplicator
from overrides import overrides
from hmtl.models import CoreferenceCustom, SrlCustomBert
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("coref_srl_bert")
class LayerCorefSrlBert(Model):
"""
A class that implement two tasks of HMTL model: EMD (CRF Tagger) and Coref (Lee et al., 2017).
Parameters
----------
vocab: ``allennlp.data.Vocabulary``, required.
The vocabulary fitted on the data.
params: ``allennlp.common.Params``, required
Configuration parameters for the multi-task model.
regularizer: ``allennlp.nn.RegularizerApplicator``, optional (default = None)
A reguralizer to apply to the model's layers.
"""
def __init__(
self,
vocab: Vocabulary,
params: Params,
regularizer: RegularizerApplicator = None,
):
super(LayerCorefSrlBert, self).__init__(vocab=vocab, regularizer=regularizer)
srl_params = params.pop("srl")
coref_params = params.pop("coref")
# Base text Field Embedder
_bert_model_name = srl_params.pop("bert_model")
bert_model = AutoModel.from_pretrained(_bert_model_name)
bert_model._name = _bert_model_name
text_field_embedder_params = params.pop("text_field_embedder")
text_field_embedder_params["token_embedders"]["tokens"][
"model_name"
] = bert_model
text_field_embedder = BasicTextFieldEmbedder.from_params(
vocab=vocab, params=text_field_embedder_params
)
self._text_field_embedder = text_field_embedder
############
# SRL Stuffs
############
tagger_srl = SrlCustomBert(
vocab=vocab,
bert_model=bert_model,
label_smoothing=params.pop_float("label_smoothing", 0.1),
)
self._tagger_srl = tagger_srl
##############
# Coref Stuffs
##############
# Encoder
encoder_coref_params = coref_params.pop("encoder")
encoder_coref = Seq2SeqEncoder.from_params(encoder_coref_params)
self._encoder_coref = encoder_coref
# Tagger: Coreference
tagger_coref_params = coref_params.pop("tagger")
eval_on_gold_mentions = tagger_coref_params.pop_bool(
"eval_on_gold_mentions", False
)
init_params = tagger_coref_params.pop("initializer", None)
initializer = (
InitializerApplicator.from_params(init_params)
if init_params is not None
else InitializerApplicator()
)
# Span embedders
self._endpoint_span_extractor = EndpointSpanExtractor(
self._encoder_coref.get_output_dim(),
combination="x,y",
num_width_embeddings=tagger_coref_params.get("max_span_width", 10),
span_width_embedding_dim=tagger_coref_params.get("feature_size", 20),
bucket_widths=False,
)
input_embedding_size = self._text_field_embedder.get_output_dim()
self._attentive_span_extractor = SelfAttentiveSpanExtractor(
input_dim=input_embedding_size
)
tagger_coref = CoreferenceCustom(
vocab=vocab,
text_field_embedder=self._text_field_embedder,
context_layer=self._encoder_coref,
mention_feedforward=FeedForward.from_params(
tagger_coref_params.pop("mention_feedforward")
),
antecedent_feedforward=FeedForward.from_params(
tagger_coref_params.pop("antecedent_feedforward")
),
feature_size=tagger_coref_params.pop_int("feature_size"),
max_span_width=tagger_coref_params.pop_int("max_span_width"),
spans_per_word=tagger_coref_params.pop_float("spans_per_word"),
max_antecedents=tagger_coref_params.pop_int("max_antecedents"),
lexical_dropout=tagger_coref_params.pop_float("lexical_dropout", 0.2),
initializer=initializer,
eval_on_gold_mentions=eval_on_gold_mentions,
)
self._tagger_coref = tagger_coref
if eval_on_gold_mentions:
self._tagger_coref._eval_on_gold_mentions = True
logger.info("Multi-Task Learning Model has been instantiated.")
@overrides
def forward(
self,
tensor_batch,
for_training: bool = False,
task_name: str = "srl",
sample: bool = False,
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Special case for forward: for coreference, we can use gold mentions to predict the clusters
during evaluation (not during training).
"""
tagger = getattr(self, "_tagger_" + task_name)
if task_name == "coref" and tagger._eval_on_gold_mentions:
if for_training:
tagger._use_gold_mentions = False
else:
tagger._use_gold_mentions = True
tensor_batch["sample"] = sample
return tagger.forward(**tensor_batch)
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor], task_name: str = "srl"):
tagger = getattr(self, "_tagger_" + task_name)
return tagger.decode(output_dict)
@overrides
def get_metrics(
self, task_name: str = "srl", reset: bool = False, full: bool = False
) -> Dict[str, float]:
task_tagger = getattr(self, "_tagger_" + task_name)
if full and task_name == "coref":
return task_tagger.get_metrics(reset=reset, full=full)
else:
return task_tagger.get_metrics(reset=reset)
@classmethod
def from_params(
cls,
vocab: Vocabulary,
params: Params,
regularizer: RegularizerApplicator,
**kwargs
) -> "LayerCorefSrlBert":
return cls(vocab=vocab, params=params, regularizer=regularizer)
| 35.190217 | 99 | 0.657297 |
465a94cab94d5cdb8de38c4f3d4f83a1d3927393 | 2,982 | py | Python | grove/helper/os_sched.py | Hansen0314/grove.py | 8edbf4678d7634623d9279bce5a8b73f32c56325 | [
"MIT"
] | 122 | 2018-12-04T16:42:32.000Z | 2022-03-16T09:15:07.000Z | grove/helper/os_sched.py | Hansen0314/grove.py | 8edbf4678d7634623d9279bce5a8b73f32c56325 | [
"MIT"
] | 28 | 2019-03-27T19:26:25.000Z | 2022-03-30T04:49:54.000Z | grove/helper/os_sched.py | Hansen0314/grove.py | 8edbf4678d7634623d9279bce5a8b73f32c56325 | [
"MIT"
] | 91 | 2018-06-30T06:35:23.000Z | 2022-03-20T14:56:15.000Z | #!/usr/bin/env python
#
# This is the library for Grove Base Hat.
#
# OS Scheduler Classes
#
'''
provide functions to promote process real-time priority or change back to default
## License
The MIT License (MIT)
Grove Base Hat for the Raspberry Pi, used to connect grove sensors.
Copyright (C) 2018 Seeed Technology Co.,Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from __future__ import print_function
import sys
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
SCHED_BATCH = 3
SCHED_IDLE = 5
_os_import = True
max_param = None
if sys.version_info >= (3, 3):
import os as osm
max_priority = osm.sched_get_priority_max(SCHED_FIFO)
max_param = osm.sched_param(max_priority)
norm_param = osm.sched_param(0)
else:
try:
import ctypes
import ctypes.util
osm = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
except Exception:
print("error: module %s unusable" % __name__);
_os_import = False
if not max_param and _os_import:
class _sched_param(ctypes.Structure):
_fields_ = [ ('sched_priority', ctypes.c_int) ]
max_param_c = _sched_param()
max_priority = osm.sched_get_priority_max(SCHED_FIFO)
# print("max priority = %d" % max_priority)
max_param_c.sched_priority = max_priority
max_param = ctypes.byref(max_param_c)
norm_param_c = _sched_param()
norm_param_c.sched_priority = 0
norm_param = ctypes.byref(norm_param_c)
def set_max_priority():
"Set current process to highest priority"
if not _os_import:
return False
osm.sched_setscheduler(0, SCHED_FIFO, max_param)
def set_default_priority():
"Set current process to default priority"
if not _os_import:
return False
osm.sched_setscheduler(0, SCHED_OTHER, norm_param)
'''
class Sched(object):
def __init__(self):
"Initialize Sched object"
'''
if __name__ == '__main__':
import time
set_max_priority()
time.sleep(1)
set_default_priority()
| 28.951456 | 81 | 0.735748 |
f31324736fba9cfb76a0dcc9c72154ea67de6f9d | 1,020 | py | Python | kubernetes/test/test_v1alpha1_role_binding_list.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1alpha1_role_binding_list.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1alpha1_role_binding_list.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | 1 | 2020-05-09T07:16:55.000Z | 2020-05-09T07:16:55.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1alpha1_role_binding_list import V1alpha1RoleBindingList
class TestV1alpha1RoleBindingList(unittest.TestCase):
""" V1alpha1RoleBindingList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1RoleBindingList(self):
"""
Test V1alpha1RoleBindingList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1alpha1_role_binding_list.V1alpha1RoleBindingList()
pass
if __name__ == '__main__':
unittest.main()
| 22.666667 | 105 | 0.72549 |
e47e87176e623ffa0ad8d40450904494c570bcdb | 2,334 | py | Python | translate.py | siboehm/hgraph2graph | 91d0dc763be1f9bc97e0c2473b0a00a058b4a9f9 | [
"MIT"
] | null | null | null | translate.py | siboehm/hgraph2graph | 91d0dc763be1f9bc97e0c2473b0a00a058b4a9f9 | [
"MIT"
] | null | null | null | translate.py | siboehm/hgraph2graph | 91d0dc763be1f9bc97e0c2473b0a00a058b4a9f9 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
import math, random, sys
import numpy as np
import argparse
from hgraph import *
import rdkit
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = argparse.ArgumentParser()
parser.add_argument("--test", required=True)
parser.add_argument("--vocab", required=True)
parser.add_argument("--atom_vocab", default=common_atom_vocab)
parser.add_argument("--model", required=True)
parser.add_argument("--num_decode", type=int, default=20)
parser.add_argument("--sample", action="store_true")
parser.add_argument("--novi", action="store_true")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--rnn_type", type=str, default="LSTM")
parser.add_argument("--hidden_size", type=int, default=270)
parser.add_argument("--embed_size", type=int, default=270)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--latent_size", type=int, default=4)
parser.add_argument("--depthT", type=int, default=20)
parser.add_argument("--depthG", type=int, default=20)
parser.add_argument("--diterT", type=int, default=1)
parser.add_argument("--diterG", type=int, default=3)
parser.add_argument("--dropout", type=float, default=0.0)
args = parser.parse_args()
args.enum_root = True
args.greedy = not args.sample
args.test = [line.strip("\r\n ") for line in open(args.test)]
vocab = [x.strip("\r\n ").split() for x in open(args.vocab)]
args.vocab = PairVocab(vocab)
if args.novi:
model = HierGNN(args).cuda()
else:
model = HierVGNN(args).cuda()
model.load_state_dict(torch.load(args.model))
model.eval()
dataset = MolEnumRootDataset(args.test, args.vocab, args.atom_vocab)
loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0, collate_fn=lambda x: x[0])
torch.manual_seed(args.seed)
random.seed(args.seed)
with torch.no_grad():
for i, batch in enumerate(loader):
smiles = args.test[i]
if batch is None:
for k in range(args.num_decode):
print(smiles, smiles)
else:
new_mols = model.translate(batch[1], args.num_decode, args.enum_root, args.greedy)
for k in range(args.num_decode):
print(smiles, new_mols[k])
| 32.873239 | 99 | 0.72108 |
71fe54be2fa2ad198c877096af802d4a6b25a90b | 2,569 | py | Python | tests/test_console.py | AndrasSzabo/andras-hypermodern-python | ec3193c5238ae5727f61da866e2b640a5451b90a | [
"MIT"
] | null | null | null | tests/test_console.py | AndrasSzabo/andras-hypermodern-python | ec3193c5238ae5727f61da866e2b640a5451b90a | [
"MIT"
] | null | null | null | tests/test_console.py | AndrasSzabo/andras-hypermodern-python | ec3193c5238ae5727f61da866e2b640a5451b90a | [
"MIT"
] | null | null | null | """Test cases for the console module."""
from unittest.mock import Mock
import click
import click.testing
from click.testing import CliRunner
import pytest
from pytest_mock import MockFixture
import requests
from andras_hypermodern_python import console
@pytest.fixture
def mock_wikipedia_random_page(mocker: MockFixture) -> Mock:
"""Fixture for mocking wikipedia.random_page."""
return mocker.patch("andras_hypermodern_python.wikipedia.random_page")
@pytest.fixture
def runner() -> CliRunner:
"""Fixture for invoking command-line interfaces."""
return click.testing.CliRunner()
def test_main_uses_specified_language(
runner: CliRunner, mock_wikipedia_random_page: Mock
) -> None:
"""It uses the specified language edition of Wikipedia."""
runner.invoke(console.main, ["--language=pl"])
mock_wikipedia_random_page.assert_called_with(language="pl")
def test_main_succeeds(runner: CliRunner, mock_requests_get: Mock) -> None:
"""It exits with a status code of zero (end-to-end)."""
result = runner.invoke(console.main)
assert result.exit_code == 0
def test_main_prints_title(runner: CliRunner, mock_requests_get: Mock) -> None:
"""It prints the title of the Wikipedia page."""
result = runner.invoke(console.main)
assert "Lorem Ipsum, title!" in result.output
def test_main_invokes_requests_get(runner: CliRunner, mock_requests_get: Mock) -> None:
"""It invokes requests.get."""
runner.invoke(console.main)
assert mock_requests_get.called
def test_main_uses_en_wikipedia_org(runner: CliRunner, mock_requests_get: Mock) -> None:
"""It uses the English Wikipedia by default."""
runner.invoke(console.main)
# Get the arguments that were passed to the mocked function:
args, _ = mock_requests_get.call_args
assert "en.wikipedia.org" in args[0]
def test_main_fails_on_request_error(
runner: CliRunner, mock_requests_get: Mock
) -> None:
"""It exits with a non-zero exit status code if the request fails."""
# mock to raise an exception instead of returning a value:
mock_requests_get.side_effect = Exception("Boom")
result = runner.invoke(console.main)
assert result.exit_code == 1
def test_main_prints_message_on_request_error(
runner: CliRunner, mock_requests_get: Mock
) -> None:
"""It prints an error message if the request fails."""
mock_requests_get.side_effect = requests.RequestException
# The above exception comes form the requests package
result = runner.invoke(console.main)
assert "Error" in result.output
| 32.518987 | 88 | 0.746594 |
f138940a65027c5a17836bdcacbfc95fc5c8cb80 | 13,221 | py | Python | projectq/ops/_command.py | daisyrainsmith/ProjectQ | b8b6057a991a40ec9ad0a5d8c78f0ac90d13bcf9 | [
"Apache-2.0"
] | null | null | null | projectq/ops/_command.py | daisyrainsmith/ProjectQ | b8b6057a991a40ec9ad0a5d8c78f0ac90d13bcf9 | [
"Apache-2.0"
] | null | null | null | projectq/ops/_command.py | daisyrainsmith/ProjectQ | b8b6057a991a40ec9ad0a5d8c78f0ac90d13bcf9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file defines the apply_command function and the Command class.
When a gate is applied to qubits, e.g.,
.. code-block:: python
CNOT | (qubit1, qubit2)
a Command object is generated which represents both the gate, qubits and
control qubits. This Command object then gets sent down the compilation
pipeline.
In detail, the Gate object overloads the operator| (magic method __or__)
to generate a Command object which stores the qubits in a canonical order
using interchangeable qubit indices defined by the gate to allow the
optimizer to cancel the following two gates
.. code-block:: python
Swap | (qubit1, qubit2)
Swap | (qubit2, qubit1)
The command then gets sent to the MainEngine via the
apply wrapper (apply_command).
"""
from enum import IntEnum
from copy import deepcopy
import projectq
from projectq.types import WeakQubitRef, Qureg
from projectq.ops import _basics
def apply_command(cmd):
"""
Apply a command.
Extracts the qubits-owning (target) engine from the Command object
and sends the Command to it.
Args:
cmd (Command): Command to apply
"""
engine = cmd.engine
engine.receive([cmd])
class Command(object):
"""
Class:
used as a container to store commands. If a gate is applied to
qubits, then the gate and qubits are saved in a command object. Qubits
are copied into WeakQubitRefs in order to allow early deallocation (would
be kept alive otherwise). WeakQubitRef qubits don't send deallocate gate
when destructed.
Attributes:
gate: The gate to execute
qubits: Tuple of qubit lists (e.g. Quregs). Interchangeable qubits
are stored in a unique order
control_qubits: The Qureg of control qubits in a unique order
engine: The engine (usually: MainEngine)
tags: The list of tag objects associated with this command
(e.g., ComputeTag, UncomputeTag, LoopTag, ...). tag objects need to
support ==, != (__eq__ and __ne__) for comparison as used in e.g.
TagRemover. New tags should always be added to the end of the list.
This means that if there are e.g. two LoopTags in a command, tag[0]
is from the inner scope while tag[1] is from the other scope as the
other scope receives the command after the inner scope LoopEngine
and hence adds its LoopTag to the end.
all_qubits: A tuple of control_qubits + qubits
"""
def __init__(self, engine, gate, qubits, controls=(), tags=()):
"""
Initialize a Command object.
Note:
control qubits (Command.control_qubits) are stored as a
list of qubits, and command tags (Command.tags) as a list of tag-
objects. All functions within this class also work if
WeakQubitRefs are supplied instead of normal Qubit objects
(see WeakQubitRef).
Args:
engine (projectq.cengines.BasicEngine):
engine which created the qubit (mostly the MainEngine)
gate (projectq.ops.Gate):
Gate to be executed
qubits (tuple[Qureg]):
Tuple of quantum registers (to which the gate is applied)
controls (Qureg|list[Qubit]):
Qubits that condition the command.
tags (list[object]):
Tags associated with the command.
"""
qubits = tuple(
[WeakQubitRef(qubit.engine, qubit.id) for qubit in qreg]
for qreg in qubits)
self.gate = gate
self.tags = list(tags)
self.qubits = qubits # property
self.control_qubits = controls # property
self.engine = engine # property
self._commutable_circuit_list = self.gate.get_commutable_circuit_list(n=len(self.control_qubits))
@property
def qubits(self):
return self._qubits
@qubits.setter
def qubits(self, qubits):
self._qubits = self._order_qubits(qubits)
def __deepcopy__(self, memo):
""" Deepcopy implementation. Engine should stay a reference."""
return Command(self.engine, deepcopy(self.gate), self.qubits,
list(self.control_qubits), deepcopy(self.tags))
def get_inverse(self):
"""
Get the command object corresponding to the inverse of this command.
Inverts the gate (if possible) and creates a new command object from
the result.
Raises:
NotInvertible: If the gate does not provide an inverse (see
BasicGate.get_inverse)
"""
return Command(self._engine, projectq.ops.get_inverse(self.gate),
self.qubits, list(self.control_qubits),
deepcopy(self.tags))
def is_identity(self):
"""
Evaluate if the gate called in the command object is an identity gate.
Returns:
True if the gate is equivalent to an Identity gate, False otherwise
"""
return projectq.ops.is_identity(self.gate)
def is_commutable(self, other):
"""
Evaluate if this command is commutable with another command.
Args:
other (Command): The other command.
Returns:
Commutability value (int) : value of the commutability enum
"""
if not overlap(self.all_qubits, other.all_qubits):
return Commutability.NOT_COMMUTABLE
self._commutable_circuit_list = self.gate.get_commutable_circuit_list(len(self.control_qubits))
# If other gate may be part of a list which is
# commutable with gate, return enum MAYBE_COMMUTABLE
for circuit in self._commutable_circuit_list:
if type(other.gate) is type(circuit[0]._gate):
return Commutability.MAYBE_COMMUTABLE
else:
return self.gate.is_commutable(other.gate)
def get_merged(self, other):
"""
Merge this command with another one and return the merged command
object.
Args:
other: Other command to merge with this one (self)
Raises:
NotMergeable: if the gates don't supply a get_merged()-function
or can't be merged for other reasons.
"""
if (self.tags == other.tags and self.all_qubits == other.all_qubits
and self.engine == other.engine):
return Command(self.engine, self.gate.get_merged(other.gate),
self.qubits, self.control_qubits,
deepcopy(self.tags))
raise projectq.ops.NotMergeable("Commands not mergeable.")
def _order_qubits(self, qubits):
"""
Order the given qubits according to their IDs (for unique comparison of
commands).
Args:
qubits: Tuple of quantum registers (i.e., tuple of lists of qubits)
Returns: Ordered tuple of quantum registers
"""
ordered_qubits = list(qubits)
# e.g. [[0,4],[1,2,3]]
interchangeable_qubit_indices = self.interchangeable_qubit_indices
for old_positions in interchangeable_qubit_indices:
new_positions = sorted(old_positions,
key=lambda x: ordered_qubits[x][0].id)
qubits_new_order = [ordered_qubits[i] for i in new_positions]
for i in range(len(old_positions)):
ordered_qubits[old_positions[i]] = qubits_new_order[i]
return tuple(ordered_qubits)
@property
def interchangeable_qubit_indices(self):
"""
Return nested list of qubit indices which are interchangeable.
Certain qubits can be interchanged (e.g., the qubit order for a Swap
gate). To ensure that only those are sorted when determining the
ordering (see _order_qubits), self.interchangeable_qubit_indices is
used.
Example:
If we can interchange qubits 0,1 and qubits 3,4,5,
then this function returns [[0,1],[3,4,5]]
"""
return self.gate.interchangeable_qubit_indices
@property
def control_qubits(self):
""" Returns Qureg of control qubits."""
return self._control_qubits
@control_qubits.setter
def control_qubits(self, qubits):
"""
Set control_qubits to qubits
Args:
control_qubits (Qureg): quantum register
"""
self._control_qubits = ([
WeakQubitRef(qubit.engine, qubit.id) for qubit in qubits
])
self._control_qubits = sorted(self._control_qubits, key=lambda x: x.id)
def add_control_qubits(self, qubits):
"""
Add (additional) control qubits to this command object.
They are sorted to ensure a canonical order. Also Qubit objects
are converted to WeakQubitRef objects to allow garbage collection and
thus early deallocation of qubits.
Args:
qubits (list of Qubit objects): List of qubits which control this
gate, i.e., the gate is only executed if all qubits are
in state 1.
"""
assert (isinstance(qubits, list))
self._control_qubits.extend(
[WeakQubitRef(qubit.engine, qubit.id) for qubit in qubits])
self._control_qubits = sorted(self._control_qubits, key=lambda x: x.id)
@property
def all_qubits(self):
"""
Get all qubits (gate and control qubits).
Returns a tuple T where T[0] is a quantum register (a list of
WeakQubitRef objects) containing the control qubits and T[1:] contains
the quantum registers to which the gate is applied.
"""
return (self.control_qubits, ) + self.qubits
@property
def engine(self):
"""
Return engine to which the qubits belong / on which the gates are
executed.
"""
return self._engine
@engine.setter
def engine(self, engine):
"""
Set / Change engine of all qubits to engine.
Args:
engine: New owner of qubits and owner of this Command object
"""
self._engine = engine
for qureg in self.qubits:
for qubit in qureg:
qubit.engine = engine
for qubit in self.control_qubits:
qubit.engine = engine
def __eq__(self, other):
"""
Compare this command to another command.
Args:
other (Command): Command object to compare this to
Returns: True if Command objects are equal (same gate, applied to same
qubits; ordered modulo interchangeability; and same tags)
"""
if (isinstance(other, self.__class__) and self.gate == other.gate
and self.tags == other.tags and self.engine == other.engine
and self.all_qubits == other.all_qubits):
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.to_string()
def to_string(self, symbols=False):
"""
Get string representation of this Command object.
"""
qubits = self.qubits
ctrlqubits = self.control_qubits
if len(ctrlqubits) > 0:
qubits = (self.control_qubits, ) + qubits
qstring = ""
if len(qubits) == 1:
qstring = str(Qureg(qubits[0]))
else:
qstring = "( "
for qreg in qubits:
qstring += str(Qureg(qreg))
qstring += ", "
qstring = qstring[:-2] + " )"
cstring = "C" * len(ctrlqubits)
return cstring + self.gate.to_string(symbols) + " | " + qstring
def overlap(tuple1, tuple2):
"""
Takes two tuples of lists, flattens them and counts the number
of common elements. Used to check if two commands have qubits
or control qubits in common.
i.e. command1.all_qubits = [[control_qubits], [qubits]]
command2.all_qubits = [[control_qubits], [qubits]]
overlap(command1, command2) = 4
means command1 and command2 have 4 qubits or control
qubits in common.
"""
flat_tuple1 = [item for sublist in tuple1 for item in sublist]
flat_tuple2 = [item for sublist in tuple2 for item in sublist]
n=0
for element in flat_tuple1:
if element in flat_tuple2:
n+=1
return n
class Commutability(IntEnum):
NOT_COMMUTABLE = 0
COMMUTABLE = 1
MAYBE_COMMUTABLE = 2
| 35.829268 | 105 | 0.62673 |
77834a426e1d00a96753c44d2649b1a157552d4a | 2,146 | py | Python | app/main/auth/forms.py | printdaigang/groupby | e222cc19751abcd05ebb8e18a086fd448148e8bf | [
"MIT"
] | null | null | null | app/main/auth/forms.py | printdaigang/groupby | e222cc19751abcd05ebb8e18a086fd448148e8bf | [
"MIT"
] | null | null | null | app/main/auth/forms.py | printdaigang/groupby | e222cc19751abcd05ebb8e18a086fd448148e8bf | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from app import db
from app.models import User
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms import ValidationError
from wtforms.validators import Email, Length, DataRequired, EqualTo
class LoginForm(Form):
email = StringField('Email',
validators=[DataRequired(message=u"该项忘了填写了!"), Length(1, 64), Email(message=u"你确定这是 Email ?")])
password = PasswordField(u'密码', validators=[DataRequired(message=u"该项忘了填写了!"), Length(6, 32)])
remember_me = BooleanField(u"保持我的登入状态", default=True)
submit = SubmitField(u'登入')
class RegistrationForm(Form):
email = StringField('Email',
validators=[DataRequired(message=u"该项忘了填写了!"), Length(1, 64), Email(message=u"你确定这是 Email ?")])
name = StringField(u'用户名', validators=[DataRequired(message=u"该项忘了填写了!"), Length(1, 64)])
password = PasswordField(u'密码',
validators=[DataRequired(message=u"该项忘了填写了!"), EqualTo('password2', message=u'密码必须匹配'),
Length(6, 32)])
password2 = PasswordField(u'再次确认密码', validators=[DataRequired(message=u"该项忘了填写了!")])
submit = SubmitField(u'注册')
def validate_email(self, filed):
if User.query.filter(db.func.lower(User.email) == db.func.lower(filed.data)).first():
raise ValidationError(u'该 Email 已经被注册了')
class ChangePasswordForm(Form):
old_password = PasswordField(u'旧密码', validators=[DataRequired(message=u"该项忘了填写了!")])
new_password = PasswordField(u'新密码', validators=[DataRequired(message=u"该项忘了填写了!"),
EqualTo('confirm_password', message=u'密码必须匹配'),
Length(6, 32)])
confirm_password = PasswordField(u'确认新密码', validators=[DataRequired(message=u"该项忘了填写了!")])
submit = SubmitField(u"保存密码")
def validate_old_password(self, filed):
from flask.ext.login import current_user
if not current_user.verify_password(filed.data):
raise ValidationError(u'原密码错误')
| 47.688889 | 119 | 0.646785 |
a279e33bf1c2840ec8d857bd275621e4f84027fe | 400 | py | Python | beegarden/themes/dark/__init__.py | rosin55/beegarden | 6d5173ead78d94ee39fec7182665ef950bf49fcc | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2017-08-08T08:17:55.000Z | 2021-08-31T19:51:01.000Z | beegarden/themes/dark/__init__.py | rosin55/beegarden | 6d5173ead78d94ee39fec7182665ef950bf49fcc | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | beegarden/themes/dark/__init__.py | rosin55/beegarden | 6d5173ead78d94ee39fec7182665ef950bf49fcc | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2016-02-22T18:47:00.000Z | 2019-03-25T21:30:32.000Z | # -*- coding: utf-8 -*-
import os
PICTURES_PATH = os.path.dirname(__file__)
# BACKGROUND_COLOR = (13, 13, 13)
BACKGROUND_IMAGE = 'background.jpg'
METER_1_COLOR = (80, 80, 80)
METER_2_COLOR = (70, 70, 90)
FIELD_WIDTH = 1200
FIELD_HEIGHT = 600
TEAMS_COUNT = 4
DEBUG = False
MAX_HEALTH = 100
STING_POWER = 50
HEALTH_TOP_UP_SPEED = 0.5
BEEHIVE_SAFE_DISTANCE = 200
# See robogame_engine.constants
| 16 | 41 | 0.7275 |
6829d96f67dd4943f71e2b9c0d1aa2f7d575581e | 23,624 | py | Python | peri2organise/tutor/views.py | jakemalley/peri2organise | fa3416103d08d7a86004d51d39dac2513ad5053b | [
"MIT"
] | null | null | null | peri2organise/tutor/views.py | jakemalley/peri2organise | fa3416103d08d7a86004d51d39dac2513ad5053b | [
"MIT"
] | null | null | null | peri2organise/tutor/views.py | jakemalley/peri2organise | fa3416103d08d7a86004d51d39dac2513ad5053b | [
"MIT"
] | null | null | null | # views.py
# Jake Malley
# Views used in the tutor blueprint.
# Flask imports
from flask import Blueprint
from flask import abort
from flask import flash
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from flask.ext.login import current_user
from flask.ext.mail import Message
# Application Imports
from peri2organise import app
from peri2organise import db
from peri2organise import mail
from peri2organise.models import Lesson
from peri2organise.models import Parent
from peri2organise.models import Room
from peri2organise.models import User
from peri2organise.models import UserLessonAssociation
from peri2organise.auth.utils import login_required
from peri2organise.student.utils import select_future_lessons
from peri2organise.student.utils import select_past_lessons
from peri2organise.student.utils import select_lessons_assoc
from peri2organise.student.utils import select_user
from peri2organise.student.utils import select_users_by_roles
from peri2organise.student.forms import ContactForm
from peri2organise.tutor.forms import AddLessonForm
from peri2organise.tutor.forms import EditLessonForm
from peri2organise.tutor.forms import SelectMinMaxDateForm
from peri2organise.tutor.forms import RecordSingleAttendanceForm
from peri2organise.tutor.forms import UpdatePersonalDetailsForm
from peri2organise.tutor.utils import generate_timesheet
from peri2organise.tutor.utils import total_time
from peri2organise.tutor.utils import select_students
from peri2organise.tutor.utils import select_parents
from peri2organise.tutor.utils import select_lessons
from peri2organise.tutor.utils import check_attendance_complete
from peri2organise.tutor.utils import send_lesson_update
# Imports
from datetime import datetime
from datetime import time
from datetime import timedelta
# Create tutor blueprint.
tutor_blueprint = Blueprint('tutor', __name__)
@tutor_blueprint.route('/')
@login_required(roles=['TUT', 'STA'])
def index():
"""
Index, redirect to dashboard.
"""
return redirect(url_for('tutor.dashboard'))
@tutor_blueprint.route('/dashboard')
@login_required(roles=['TUT', 'STA'])
def dashboard():
"""
Tutor dashboard.
"""
# Select all of todays lessons.
todays_lessons = select_lessons(
current_user,
min_date=datetime.now().date(),
max_date=datetime.now().date()+timedelta(days=1),
order_by=Lesson.lesson_datetime.asc()
)
return render_template(
'tutor/dashboard.html', todays_lessons=todays_lessons
)
@tutor_blueprint.route('/lessons')
@login_required(roles=['TUT', 'STA'])
def lessons():
"""
View all lessons.
"""
# Select all the lessons, where the tutor is the current user, and the lessons
# are in the future.
upcoming_lessons = select_future_lessons(current_user)
# Select all previous lessons, where the tutor is the current user, and the lessons
# are in the past.
previous_lessons = select_past_lessons(current_user)
# Render the template passing in the lessons selected from the database.
return render_template(
'tutor/lessons.html', upcoming_lessons=upcoming_lessons, previous_lessons=previous_lessons
)
@tutor_blueprint.route('/lessons/<int:lesson_id>')
@login_required(roles=['TUT', 'STA'])
def view_lesson(lesson_id):
"""
View a single lesson.
"""
# Get the UserLessonAssociation for the current and
# the given lesson id. (So we can also display attendance etc.)
assoc = select_lessons_assoc(current_user, lesson_id=lesson_id, single=True)
# Ensure the lesson id/association object is found.
if not assoc:
abort(404)
# Render the view lesson template and pass in the association and the lesson object.
return render_template(
'tutor/view_lesson.html', lesson=assoc.lesson, assoc=assoc
)
@tutor_blueprint.route('/lessons/add', methods=['GET', 'POST'])
@login_required(roles=['TUT', 'STA'])
def add_lesson():
"""
Add a new lesson.
"""
# Create empty error object.
error = None
# Create form.
add_lesson_form = AddLessonForm()
# Add the rooms.
add_lesson_form.lesson_room_id.choices = [
(room.room_id, room.get_location()) for room in Room.query.all()
]
# Select all users.
all_users = select_users_by_roles(('STU', 'TUT', 'STA'))
# Update the form choices.
add_lesson_form.users.choices = [
(user.user_id, user.get_full_name() + " (" + user.get_role(pretty=True) + ")") for user in all_users
]
# Remove the current user.
add_lesson_form.users.choices.remove(
(current_user.user_id, current_user.get_full_name() + " (" + current_user.get_role(pretty=True) + ")")
)
if request.method == 'POST' and add_lesson_form.validate_on_submit():
# Create a new lesson object.
new_lesson = Lesson()
# Create the datetime object.
lesson_datetime = datetime.combine(
add_lesson_form.lesson_date.data,
time(
add_lesson_form.lesson_hour.data,
add_lesson_form.lesson_minute.data
)
)
# Update the lesson details.
new_lesson.update_lesson_details(
lesson_datetime=lesson_datetime,
lesson_duration=add_lesson_form.lesson_duration.data*60,
lesson_notes=add_lesson_form.lesson_notes.data,
room_id=add_lesson_form.lesson_room_id.data
)
# Iterate through the users.
for user_id in add_lesson_form.users.data:
# Select the user object.
user_object = select_user(user_id)
# Append it to the lessons users.
new_lesson.users.append(user_object)
# Send an update.
if app.config['UPDATE_ON_NEW_LESSON']:
# Send an email update.
html = 'A new lesson has been created on: ' + new_lesson.get_lesson_date()
# Send a lesson update.
send_lesson_update(
user_object, html,
url_for('student.lessons', _external=True)
)
# Add the current user to the lesson.
new_lesson.users.append(current_user)
# Flash a success message.
flash("Successfully added new lesson.")
# Add the lesson to the db.
db.session.add(new_lesson)
# Commit changes.
db.session.commit()
return redirect(url_for('tutor.add_lesson'))
return render_template(
'tutor/add_lesson.html', add_lesson_form=add_lesson_form, error=error
)
@tutor_blueprint.route('/lessons/edit/<int:lesson_id>', methods=['GET', 'POST'])
@login_required(roles=['TUT', 'STA'])
def edit_lesson(lesson_id):
"""
Edit a lesson.
"""
# Find the lesson with the given ID.
lesson = select_lessons(current_user, lesson_id=lesson_id, single=True)
# If the lesson is not found abort.
if not lesson:
# HTTP not found.
abort(404)
# Create a new edit lesson form.
edit_lesson_form = EditLessonForm()
# Add the rooms.
edit_lesson_form.lesson_room_id.choices = [
(room.room_id, room.get_location()) for room in Room.query.all()
]
# Select all users.
all_users = select_users_by_roles(('STU', 'TUT', 'STA'))
# Set the choices for the users that can be selected for the new users.
edit_lesson_form.add_users.choices = [
(user.user_id, user.get_full_name() + " (" + user.get_role(pretty=True) + ")") for user in all_users
]
# Remove the current user.
edit_lesson_form.add_users.choices.remove(
(current_user.user_id, current_user.get_full_name() + " (" + current_user.get_role(pretty=True) + ")")
)
# All the users that can be removed are the users of the lesson.
edit_lesson_form.remove_users.choices = [
(user.user_id, user.get_full_name() + " (" + user.get_role(pretty=True) + ")") for user in lesson.users
]
# Remove the current user from these choices (as they must still be in the lesson).
edit_lesson_form.remove_users.choices.remove(
(current_user.user_id, current_user.get_full_name() + " (" + current_user.get_role(pretty=True) + ")")
)
if request.method == 'POST' and edit_lesson_form.validate_on_submit():
# Create the datetime object.
lesson_datetime = datetime.combine(
edit_lesson_form.lesson_date.data,
time(
edit_lesson_form.lesson_hour.data,
edit_lesson_form.lesson_minute.data
)
)
# Update the lesson.
lesson.update_lesson_details(
lesson_datetime=lesson_datetime,
lesson_duration=edit_lesson_form.lesson_duration.data*60,
lesson_notes=edit_lesson_form.lesson_notes.data,
room_id=edit_lesson_form.lesson_room_id.data
)
if app.config['UPDATE_ON_EDIT_LESSON']:
# Iterate through the users and send updates.
for user in lesson.users:
if user.get_role() == 'STU':
# Send an email update.
html = 'Your lesson on: ' + lesson.get_lesson_date() + \
' has been updated.'
# Send a lesson update.
send_lesson_update(
user, html,
url_for(
'student.view_lesson',
lesson_id=lesson.lesson_id,
_external=True
)
)
# Iterate through the users to add.
for user_id in edit_lesson_form.add_users.data:
# Select the user object.
user_object = select_user(user_id)
# If the user is not already going to the lesson.
if user_object not in lesson.users:
# Append it to the lessons users.
lesson.users.append(user_object)
# Send an email update.
html = 'You have been added to a lesson on: ' + lesson.get_lesson_date()
# Send a lesson update.
send_lesson_update(
user_object, html,
url_for(
'student.view_lesson',
lesson_id=lesson.lesson_id,
_external=True
)
)
# Iterate through the users to remove.
for user_id in edit_lesson_form.remove_users.data:
# Delete the user lesson association for this user/lesson.
db.session.delete(
UserLessonAssociation.query.filter(
UserLessonAssociation.lesson_id == lesson_id
).filter(
UserLessonAssociation.user_id == user_id
).first()
)
# Send an email update.
html = 'You have been removed from the lesson on: ' + lesson.get_lesson_date() \
+ ' this means your attendance is no longer required.'
# Send a lesson update.
send_lesson_update(
User.query.filter(User.user_id == user_id).first(), html,
url_for('student.lessons', _external=True)
)
# Commit Changes.
db.session.commit()
# Flash a success message.
flash("Successfully updated lesson.")
# Set the defaults.
edit_lesson_form.lesson_date.default = lesson.lesson_datetime.date()
edit_lesson_form.lesson_notes.default = lesson.get_lesson_notes()
edit_lesson_form.lesson_room_id.default = lesson.room.room_id
# Process the form.
edit_lesson_form.process()
return render_template(
'tutor/edit_lesson.html', edit_lesson_form=edit_lesson_form, lesson=lesson
)
@tutor_blueprint.route('/students')
@login_required(roles=['TUT', 'STA'])
def students():
"""
View all students.
"""
# Select the students.
all_students = select_students(current_user, my_students=True)
# Render the students template.
return render_template(
'tutor/students.html', students=all_students
)
@tutor_blueprint.route('/students/<int:student_id>')
@login_required(roles=['TUT', 'STA'])
def view_student(student_id):
"""
View a single student.
"""
# Find the student.
student = select_user(student_id, role='STU')
# Check the user exists.
if student is not None:
return render_template(
'tutor/view_student.html', student=student
)
else:
# If the student isn't found, return a 404.
abort(404)
@tutor_blueprint.route('/parents/<int:parent_id>')
@login_required(roles=['TUT', 'STA'])
def view_parent(parent_id):
"""
View a single parent.
"""
# Select the parent.
parent = select_parents(parent_id=parent_id, single=True)
# Check the parent exists.
if parent is not None:
return render_template(
'tutor/view_parent.html', parent=parent
)
else:
# If the parent isn't found, return a 404.
abort(404)
@tutor_blueprint.route('/attendance', methods=['GET', 'POST'])
@login_required(roles=['TUT', 'STA'])
def attendance():
"""
Display all lessons attendance can be recorded for.
"""
# Create new form objects for minimum and maximum dates.
select_date_form = SelectMinMaxDateForm()
if request.method == 'POST' and select_date_form.validate_on_submit():
# Form was submitted and is valid, filter by dates.
no_attendance_recorded = select_lessons(
current_user, attendance_recorded=False,
min_date=select_date_form.min_date.data, max_date=select_date_form.max_date.data,
order_by=Lesson.lesson_datetime.asc()
)
else:
# Select all lessons with recorded attendance.
no_attendance_recorded = select_lessons(
current_user, attendance_recorded=False,
order_by=Lesson.lesson_datetime.asc()
)
# Render the attendance template.
return render_template(
'tutor/attendance.html',
no_attendance_recorded=no_attendance_recorded,
select_date_form=select_date_form
)
@tutor_blueprint.route('/attendance/record/<int:lesson_id>', methods=['GET', 'POST'])
@login_required(roles=['TUT', 'STA'])
def record_attendance(lesson_id):
"""
Record attendance for a lesson.
"""
# Get the UserLessonAssociation for the current and
# the given lesson id. (So we can also display attendance etc.)
lesson = select_lessons(current_user, lesson_id=lesson_id, single=True)
# Ensure the lesson id/association object is found.
if not lesson:
abort(404)
record_single_attendance_form = RecordSingleAttendanceForm()
if request.method == 'POST' and record_single_attendance_form.validate_on_submit():
assoc = UserLessonAssociation.query.filter(
UserLessonAssociation.lesson_id == lesson_id
).filter(
UserLessonAssociation.user_id == int(record_single_attendance_form.user_id.data)
).first()
if assoc:
assoc.attendance_code = record_single_attendance_form.attendance_code.data
flash("Successfully updated lesson attendance.")
else:
abort(500)
# We only want to send updates if they we're late or not there.
if assoc.attendance_code == 'L' or assoc.attendance_code == 'N':
# Send an email update.
html = 'Attendance for your lesson on: ' + assoc.lesson.get_lesson_date() \
+ ' has been updated. Your attendance is now recorded as: ' + \
assoc.get_lesson_attendance_str()
# Send a lesson update.
send_lesson_update(
assoc.user, html,
url_for(
'student.view_lesson',
lesson_id=lesson_id,
_external=True
),
parent=True
)
if check_attendance_complete(lesson):
# The attendance is complete.
lesson.update_lesson_details(attendance_recorded=True)
else:
lesson.update_lesson_details(attendance_recorded=False)
# Save Changes
db.session.commit()
# Refresh
return redirect(url_for('tutor.record_attendance', lesson_id=lesson_id))
# Render the view lesson template and pass in the association and the lesson object.
return render_template(
'tutor/record_attendance.html', lesson=lesson,
record_single_attendance_form=record_single_attendance_form
)
@tutor_blueprint.route('/attendance/view/<int:lesson_id>')
@login_required(roles=['TUT', 'STA'])
def view_attendance(lesson_id):
"""
View attendance for a lesson.
"""
# Get the UserLessonAssociation for the current and
# the given lesson id. (So we can also display attendance etc.)
lesson = select_lessons(current_user, lesson_id=lesson_id, single=True)
# Ensure the lesson id/association object is found.
if not lesson:
abort(404)
# Render the view lesson template and pass in the association and the lesson object.
return render_template(
'tutor/view_attendance.html', lesson=lesson,
)
@tutor_blueprint.route('/contact', methods=['GET', 'POST'])
@login_required(roles=['TUT', 'STA'])
def contact():
"""
Contact student or staff member.
"""
# Create an empty error.
error = None
# Create a form object.
contact_form = ContactForm()
# Select all the staff and tutors.
contact_form.user.choices = [
(user.user_id, user.get_full_name() + " (" + user.get_role(pretty=True) + ")") for user in select_users_by_roles(
('TUT', 'STA', 'STU')
)
]
if request.method == 'POST' and contact_form.validate_on_submit():
# Form is valid.
# Check the staff members is not the default
if contact_form.user.data == '0':
error = 'A user must be chosen.'
else:
# Find the user.
user = User.query.filter(User.user_id == contact_form.user.data).first()
# Create a new email message.
message = Message(contact_form.subject.data, recipients=[user.get_email_address()])
message.html = render_template(
'email/message.html',
user=user,
subject=contact_form.subject.data,
message=contact_form.message.data
)
# Send the message.
mail.send(message)
# Flash a success message.
flash('Successfully sent message.')
# Redirect to the dashboard.
return redirect(url_for('tutor.dashboard'))
return render_template(
'tutor/contact.html', contact_form=contact_form, error=error
)
@tutor_blueprint.route('/contactparent', methods=['GET', 'POST'])
@login_required(roles=['TUT', 'STA'])
def contact_parent():
"""
Contact parent.
"""
# Create an empty error.
error = None
# Create a form object.
contact_form = ContactForm()
# Select all the staff and tutors.
contact_form.user.choices = [
(parent.parent_id, parent.get_full_name()) for parent in select_parents()
]
if request.method == 'POST' and contact_form.validate_on_submit():
# Form is valid.
# Check the staff members is not the default
if contact_form.user.data == '0':
error = 'A parent must be chosen.'
else:
# Find the user.
parent = Parent.query.filter(Parent.parent_id == contact_form.user.data).first()
# Create a new email message.
message = Message(contact_form.subject.data, recipients=[parent.get_email_address()])
message.html = render_template(
'email/message.html',
user=parent,
subject=contact_form.subject.data,
message=contact_form.message.data,
parent=True
)
# Send the message.
mail.send(message)
# Flash a success message.
flash('Successfully sent message.')
# Redirect to the dashboard.
return redirect(url_for('tutor.dashboard'))
return render_template(
'tutor/contactparent.html', contact_form=contact_form, error=error
)
@tutor_blueprint.route('/personaldetails', methods=['GET', 'POST'])
@login_required(roles=['TUT', 'STA'])
def personal_details():
"""
Edit personal details.
"""
# Create the form object, for updating details.
update_personal_details_form = UpdatePersonalDetailsForm()
if request.method == 'POST' and update_personal_details_form.validate_on_submit():
# Form is valid.
# Ensure the update box is checked.
if update_personal_details_form.update_details.data:
# Update all the details.
current_user.update_user_details(
first_name=update_personal_details_form.tutor_first_name.data,
last_name=update_personal_details_form.tutor_last_name.data,
email_address=update_personal_details_form.tutor_email_address.data,
telephone_number=update_personal_details_form.telephone_number.data,
speciality=update_personal_details_form.speciality.data
)
# Save the changes.
db.session.commit()
# Flash a success method.
flash('Successfully updated personal details.')
# Redirect to this page - some weird stuff was
# happening with get_personal_details
return redirect(url_for('tutor.personal_details'))
# Create a dictionary of the required personal details.
user_details = current_user.get_personal_details()
return render_template(
'tutor/personaldetails.html',
update_personal_details_form=update_personal_details_form,
personal_details=user_details
)
@tutor_blueprint.route('/timesheet', methods=['GET', 'POST'])
@login_required(roles=['TUT', 'STA'])
def timesheet():
"""
Calculate the total amount of lesson time.
"""
# Create new form objects for minimum and maximum dates.
select_date_form = SelectMinMaxDateForm()
# Set lessons and hours worked to None.
time_sheet_lessons = time_sheet_time = None
if request.method == 'POST' and select_date_form.validate_on_submit():
# Set the min and max dates to the dates on the form.
min_date = select_date_form.min_date.data
max_date = select_date_form.max_date.data
else:
# This month.
min_date = datetime.now() - timedelta(days=30)
max_date = datetime.now()
# Set the form defaults to these dates.
select_date_form.min_date.default = min_date
select_date_form.max_date.default = max_date
# Process the form to update.
select_date_form.process()
# Select the lessons
time_sheet_lessons = generate_timesheet(
current_user, min_date, max_date
)
# Total the seconds worked.
time_sheet_time = total_time(time_sheet_lessons)
# Return the template with the data.
return render_template(
'tutor/timesheet.html', time_sheet_lessons=time_sheet_lessons,
time_sheet_time=time_sheet_time, select_date_form=select_date_form
)
| 36.288786 | 121 | 0.646758 |
e8f73a6ab103203f908fa86b1a8be93b9692a71f | 5,832 | py | Python | third_party/google-endpoints/Crypto/SelfTest/Cipher/test_Blowfish.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/google-endpoints/Crypto/SelfTest/Cipher/test_Blowfish.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | third_party/google-endpoints/Crypto/SelfTest/Cipher/test_Blowfish.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/test_Blowfish.py: Self-test for the Blowfish cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.Blowfish"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (plaintext, ciphertext, key) tuples.
test_data = [
# Test vectors from http://www.schneier.com/code/vectors.txt
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('ffffffffffffffff', '51866fd5b85ecb8a', 'ffffffffffffffff'),
('1000000000000001', '7d856f9a613063f2', '3000000000000000'),
('1111111111111111', '2466dd878b963c9d', '1111111111111111'),
('1111111111111111', '61f9c3802281b096', '0123456789abcdef'),
('0123456789abcdef', '7d0cc630afda1ec7', '1111111111111111'),
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('0123456789abcdef', '0aceab0fc6a0a28d', 'fedcba9876543210'),
('01a1d6d039776742', '59c68245eb05282b', '7ca110454a1a6e57'),
('5cd54ca83def57da', 'b1b8cc0b250f09a0', '0131d9619dc1376e'),
('0248d43806f67172', '1730e5778bea1da4', '07a1133e4a0b2686'),
('51454b582ddf440a', 'a25e7856cf2651eb', '3849674c2602319e'),
('42fd443059577fa2', '353882b109ce8f1a', '04b915ba43feb5b6'),
('059b5e0851cf143a', '48f4d0884c379918', '0113b970fd34f2ce'),
('0756d8e0774761d2', '432193b78951fc98', '0170f175468fb5e6'),
('762514b829bf486a', '13f04154d69d1ae5', '43297fad38e373fe'),
('3bdd119049372802', '2eedda93ffd39c79', '07a7137045da2a16'),
('26955f6835af609a', 'd887e0393c2da6e3', '04689104c2fd3b2f'),
('164d5e404f275232', '5f99d04f5b163969', '37d06bb516cb7546'),
('6b056e18759f5cca', '4a057a3b24d3977b', '1f08260d1ac2465e'),
('004bd6ef09176062', '452031c1e4fada8e', '584023641aba6176'),
('480d39006ee762f2', '7555ae39f59b87bd', '025816164629b007'),
('437540c8698f3cfa', '53c55f9cb49fc019', '49793ebc79b3258f'),
('072d43a077075292', '7a8e7bfa937e89a3', '4fb05e1515ab73a7'),
('02fe55778117f12a', 'cf9c5d7a4986adb5', '49e95d6d4ca229bf'),
('1d9d5c5018f728c2', 'd1abb290658bc778', '018310dc409b26d6'),
('305532286d6f295a', '55cb3774d13ef201', '1c587f1c13924fef'),
('0123456789abcdef', 'fa34ec4847b268b2', '0101010101010101'),
('0123456789abcdef', 'a790795108ea3cae', '1f1f1f1f0e0e0e0e'),
('0123456789abcdef', 'c39e072d9fac631d', 'e0fee0fef1fef1fe'),
('ffffffffffffffff', '014933e0cdaff6e4', '0000000000000000'),
('0000000000000000', 'f21e9a77b71c49bc', 'ffffffffffffffff'),
('0000000000000000', '245946885754369a', '0123456789abcdef'),
('ffffffffffffffff', '6b5c5a9c5d9e0a5a', 'fedcba9876543210'),
('fedcba9876543210', 'f9ad597c49db005e', 'f0'),
('fedcba9876543210', 'e91d21c1d961a6d6', 'f0e1'),
('fedcba9876543210', 'e9c2b70a1bc65cf3', 'f0e1d2'),
('fedcba9876543210', 'be1e639408640f05', 'f0e1d2c3'),
('fedcba9876543210', 'b39e44481bdb1e6e', 'f0e1d2c3b4'),
('fedcba9876543210', '9457aa83b1928c0d', 'f0e1d2c3b4a5'),
('fedcba9876543210', '8bb77032f960629d', 'f0e1d2c3b4a596'),
('fedcba9876543210', 'e87a244e2cc85e82', 'f0e1d2c3b4a59687'),
('fedcba9876543210', '15750e7a4f4ec577', 'f0e1d2c3b4a5968778'),
('fedcba9876543210', '122ba70b3ab64ae0', 'f0e1d2c3b4a596877869'),
('fedcba9876543210', '3a833c9affc537f6', 'f0e1d2c3b4a5968778695a'),
('fedcba9876543210', '9409da87a90f6bf2', 'f0e1d2c3b4a5968778695a4b'),
('fedcba9876543210', '884f80625060b8b4', 'f0e1d2c3b4a5968778695a4b3c'),
('fedcba9876543210', '1f85031c19e11968', 'f0e1d2c3b4a5968778695a4b3c2d'),
('fedcba9876543210', '79d9373a714ca34f', 'f0e1d2c3b4a5968778695a4b3c2d1e'),
('fedcba9876543210', '93142887ee3be15c',
'f0e1d2c3b4a5968778695a4b3c2d1e0f'),
('fedcba9876543210', '03429e838ce2d14b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00'),
('fedcba9876543210', 'a4299e27469ff67b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011'),
('fedcba9876543210', 'afd5aed1c1bc96a8',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122'),
('fedcba9876543210', '10851c0e3858da9f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233'),
('fedcba9876543210', 'e6f51ed79b9db21f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344'),
('fedcba9876543210', '64a6e14afd36b46f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455'),
('fedcba9876543210', '80c7d7d45a5479ad',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233445566'),
('fedcba9876543210', '05044b62fa52d080',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344556677'),
]
def get_tests(config={}):
from Crypto.Cipher import Blowfish
from common import make_block_tests
return make_block_tests(Blowfish, "Blowfish", test_data)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| 51.157895 | 79 | 0.716564 |
5fd947fcb500841ac2e5ed3ce3c9f331fb0f317c | 6,162 | py | Python | pds_pipelines/ingest_process.py | amystamile-usgs/PDS-Pipelines | cdf5c03b08cf6c231117039ac44b8d78d98cd4c2 | [
"Unlicense"
] | 8 | 2019-04-18T01:41:26.000Z | 2021-10-06T00:34:02.000Z | pds_pipelines/ingest_process.py | amystamile-usgs/PDS-Pipelines | cdf5c03b08cf6c231117039ac44b8d78d98cd4c2 | [
"Unlicense"
] | 420 | 2018-03-07T02:02:41.000Z | 2022-01-31T18:23:06.000Z | pds_pipelines/ingest_process.py | amystamile-usgs/PDS-Pipelines | cdf5c03b08cf6c231117039ac44b8d78d98cd4c2 | [
"Unlicense"
] | 19 | 2018-03-13T19:52:04.000Z | 2022-02-26T09:46:27.000Z | #!/usr/bin/env python
import os
import sys
import datetime
import logging
import json
import argparse
import hashlib
import pytz
from ast import literal_eval
from pds_pipelines.redis_queue import RedisQueue
from pds_pipelines.redis_lock import RedisLock
from pds_pipelines.db import db_connect
from pds_pipelines.config import pds_info, pds_log, pds_db, archive_base, web_base, lock_obj, upc_error_queue
from pds_pipelines.models.pds_models import Files
def parse_args():
parser = argparse.ArgumentParser(description='PDS DI Database Ingest')
parser.add_argument('--override', dest='override', action='store_true')
parser.set_defaults(override=False)
parser.add_argument('--log', '-l', dest="log_level",
choices=['DEBUG', 'INFO',
'WARNING', 'ERROR', 'CRITICAL'],
help="Set the log level.", default='INFO')
parser.add_argument('--namespace', '-n', dest="namespace",
help="The namespace used for this queue.")
args = parser.parse_args()
return args
def main(user_args):
log_level = user_args.log_level
override = user_args.override
namespace = user_args.namespace
logger = logging.getLogger('Ingest_Process')
level = logging.getLevelName(log_level)
logger.setLevel(level)
logFileHandle = logging.FileHandler(pds_log + 'Ingest.log')
print("Log File: {}Ingest.log".format(pds_log))
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s, %(message)s')
logFileHandle.setFormatter(formatter)
logger.addHandler(logFileHandle)
logger.info("Starting Ingest Process")
PDSinfoDICT = json.load(open(pds_info, 'r'))
RQ_main = RedisQueue('Ingest_ReadyQueue', namespace)
RQ_work = RedisQueue('Ingest_WorkQueue', namespace)
RQ_error = RedisQueue(upc_error_queue, namespace)
RQ_lock = RedisLock(lock_obj)
RQ_lock.add({RQ_main.id_name: '1'})
try:
Session, engine = db_connect(pds_db)
session = Session()
logger.info('DataBase Connecton: Success')
except:
logger.error('DataBase Connection: Error')
return 1
index = 1
while int(RQ_main.QueueSize()) > 0 and RQ_lock.available(RQ_main.id_name):
item = RQ_main.Qfile2Qwork(RQ_main.getQueueName(), RQ_work.getQueueName())
inputfile = literal_eval(item)[0]
archive = literal_eval(item)[1]
if not os.path.isfile(inputfile):
RQ_error.QueueAdd(f'Unable to locate or access {inputfile} during ingest processing')
logger.warning("%s is not a file\n", inputfile)
continue
subfile = inputfile.replace(PDSinfoDICT[archive]['path'], '')
# Calculate checksum in chunks of 4096
f_hash = hashlib.md5()
with open(inputfile, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
f_hash.update(chunk)
filechecksum = f_hash.hexdigest()
QOBJ = session.query(Files).filter_by(filename=subfile).first()
runflag = False
if QOBJ is None or filechecksum != QOBJ.checksum:
runflag = True
if runflag or override:
date = datetime.datetime.now(
pytz.utc).strftime("%Y-%m-%d %H:%M:%S")
fileURL = inputfile.replace(archive_base, web_base)
# If all upc requirements are in 'inputfile,' flag for upc
try:
upcflag = all(req in inputfile for req in PDSinfoDICT[archive]['upc_reqs'])
except KeyError:
logger.debug("No upc_reqs found for %s\nSetting upc eligibility False for all related files.", str(archive))
upcflag = False
filesize = os.path.getsize(inputfile)
try:
ingest_entry = Files()
if QOBJ is not None and override:
ingest_entry.fileid = QOBJ.fileid
ingest_entry.archiveid = PDSinfoDICT[archive]['archiveid']
ingest_entry.filename = subfile
ingest_entry.entry_date = date
ingest_entry.checksum = filechecksum
ingest_entry.upc_required = upcflag
ingest_entry.validation_required = True
ingest_entry.header_only = False
ingest_entry.release_date = date
ingest_entry.file_url = fileURL
ingest_entry.file_size = filesize
ingest_entry.di_pass = True
ingest_entry.di_date = date
session.merge(ingest_entry)
session.flush()
RQ_work.QueueRemove(item)
index = index + 1
except Exception as e:
logger.error("Error During File Insert %s : %s", str(subfile), str(e))
elif not runflag and not override:
RQ_work.QueueRemove(item)
logger.warning("Not running ingest: file %s already present"
" in database and no override flag supplied", inputfile)
if index >= 250:
try:
session.commit()
logger.info("Commit 250 files to Database: Success")
index = 1
except Exception as e:
session.rollback()
logger.warning("Unable to commit to database: %s", str(e))
else:
logger.info("No Files Found in Ingest Queue")
try:
session.commit()
logger.info("Commit to Database: Success")
except Exception as e:
logger.error("Unable to commit to database: %s", str(e))
session.rollback()
# Close connection to database
session.close()
engine.dispose()
if RQ_main.QueueSize() == 0 and RQ_work.QueueSize() == 0:
logger.info("Process Complete All Queues Empty")
elif RQ_main.QueueSize() == 0 and RQ_work.QueueSize() != 0:
logger.warning("Process Done Work Queue NOT Empty Contains %s Files", str(
RQ_work.QueueSize()))
logger.info("Ingest Complete")
if __name__ == "__main__":
sys.exit(main(parse_args()))
| 34.617978 | 124 | 0.613924 |
4cc02e4320a2bda9cae1de7204c3995a3774693a | 654 | py | Python | correspondence_retrieval/code/run_single.py | JiwanChung/acav100m | 51cb948d5682da69334a8d05d2df631971b60215 | [
"MIT"
] | 27 | 2021-10-13T07:49:14.000Z | 2022-03-15T06:58:00.000Z | correspondence_retrieval/code/run_single.py | JiwanChung/acav100m | 51cb948d5682da69334a8d05d2df631971b60215 | [
"MIT"
] | 3 | 2021-08-30T21:29:45.000Z | 2021-11-18T08:02:32.000Z | correspondence_retrieval/code/run_single.py | JiwanChung/acav100m | 51cb948d5682da69334a8d05d2df631971b60215 | [
"MIT"
] | 6 | 2021-08-30T18:48:32.000Z | 2021-12-16T22:11:37.000Z | import argparse
import json
from cli import Cli
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-g', '--path', type=str, default='search_targets/default.json')
parser.add_argument('-l', '--large', action='store_true')
args = parser.parse_args()
with open(args.path, 'r') as f:
options = json.load(f)
if args.large:
options['nsamples_per_class'] = 1000
else:
options['nsamples_per_class'] = 100
run(options)
def run(args):
cli = Cli()
args['verbose'] = True
res = cli.run(**args)
return args, res
if __name__ == '__main__':
main()
| 20.4375 | 88 | 0.629969 |
2483797b3bca883d02e773f5c8414c8e5e4d262d | 38,262 | py | Python | py/desisurvey/ephem.py | desi-bgs/desisurvey | dda5a40d93d1169d2313a0aa6ab44656a6a2fe5b | [
"BSD-3-Clause"
] | null | null | null | py/desisurvey/ephem.py | desi-bgs/desisurvey | dda5a40d93d1169d2313a0aa6ab44656a6a2fe5b | [
"BSD-3-Clause"
] | null | null | null | py/desisurvey/ephem.py | desi-bgs/desisurvey | dda5a40d93d1169d2313a0aa6ab44656a6a2fe5b | [
"BSD-3-Clause"
] | null | null | null | """Tabulate sun and moon ephemerides during the survey.
"""
from __future__ import print_function, division
import warnings
import math
import os.path
import datetime
import numpy as np
import scipy.interpolate
import astropy.time
import astropy.table
import astropy.utils.exceptions
import astropy.units as u
import ephem
import desiutil.log
import desisurvey.config
import desisurvey.utils
import desisurvey.tiles
# Date range 2019-2025 for tabulated ephemerides.
# This range is chosen large enough to cover commissioning,
# survey validation and the 5-year main survey, so should
# not normally need to be changed, except for testing.
START_DATE = datetime.date(2019, 1, 1)
STOP_DATE = datetime.date(2027, 12, 31)
_ephem = None
def get_ephem(use_cache=True, write_cache=True):
"""Return tabulated ephemerides for (START_DATE,STOP_DATE).
The pyephem module must be installed to calculate ephemerides,
but is not necessary when a FITS file of precalcuated data is
available.
Parameters
----------
use_cache : bool
Use cached ephemerides from memory or disk if possible
when True. Otherwise, always calculate from scratch.
write_cache : bool
When True, write a generated table so it is available for
future invocations. Writing only takes place when a
cached object is not available or ``use_cache`` is False.
Returns
-------
Ephemerides
Object with tabulated ephemerides for (START_DATE,STOP_DATE).
"""
global _ephem
# Freeze IERS table for consistent results.
desisurvey.utils.freeze_iers()
# Use standardized string representation of dates.
start_iso = START_DATE.isoformat()
stop_iso = STOP_DATE.isoformat()
range_iso = '({},{})'.format(start_iso, stop_iso)
log = desiutil.log.get_logger()
# First check for a cached object in memory.
if use_cache and _ephem is not None:
if _ephem.start_date != START_DATE or _ephem.stop_date != STOP_DATE:
raise RuntimeError('START_DATE, STOP_DATE have changed.')
log.debug('Returning cached ephemerides for {}.'.format(range_iso))
return _ephem
# Next check for a FITS file on disk.
config = desisurvey.config.Configuration()
filename = config.get_path('ephem_{}_{}.fits'.format(start_iso, stop_iso))
if use_cache and os.path.exists(filename):
# Save restored object in memory.
_ephem = Ephemerides(START_DATE, STOP_DATE, restore=filename)
log.info('Restored ephemerides for {} from {}.'
.format(range_iso, filename))
return _ephem
# Finally, create new ephemerides and save in the memory cache.
log.info('Building ephemerides for {}...'.format(range_iso))
_ephem = Ephemerides(START_DATE, STOP_DATE)
if write_cache:
# Save the tabulated ephemerides to disk.
_ephem._table.write(filename, overwrite=True)
log.info('Saved ephemerides for {} to {}'.format(range_iso, filename))
return _ephem
class Ephemerides(object):
"""Tabulate ephemerides.
:func:`get_ephem` should normally be used rather than calling this
constructor directly.
Parameters
----------
start_date : datetime.date
Calculated ephemerides start on the evening of this date.
stop_date : datetime.date
Calculated ephemerides stop on the morning of this date.
num_obj_steps : int
Number of steps for tabulating object (ra, dec) during each 24-hour
period from local noon to local noon. Ignored when restore is set.
restore : str or None
Name of a file to restore ephemerides from. Construct ephemerides
from scratch when None. A restored file must have start and stop
dates that match our args.
Attributes
----------
start : astropy.time.Time
Local noon before the first night for which ephemerides are calculated.
stop : astropy.time.Time
Local noon after the last night for which ephemerides are calculated.
num_nights : int
Number of consecutive nights for which ephemerides are calculated.
"""
def __init__(self, start_date, stop_date, num_obj_steps=25, restore=None):
self.log = desiutil.log.get_logger()
config = desisurvey.config.Configuration()
# Validate date range.
num_nights = (stop_date - start_date).days
if num_nights <= 0:
raise ValueError('Expected start_date < stop_date.')
self.num_nights = num_nights
self.start_date = start_date
self.stop_date = stop_date
# Convert to astropy times at local noon.
self.start = desisurvey.utils.local_noon_on_date(start_date)
self.stop = desisurvey.utils.local_noon_on_date(stop_date)
# Moon illumination fraction interpolator will be initialized the
# first time it is used.
self._moon_illum_frac_interpolator = None
# Restore ephemerides from a FITS table if requested.
if restore is not None:
self._table = astropy.table.Table.read(restore)
assert self._table.meta['START'] == str(start_date)
assert self._table.meta['STOP'] == str(stop_date)
assert len(self._table) == num_nights
return
# Initialize an empty table to fill.
meta = dict(NAME='Survey Ephemerides', EXTNAME='EPHEM',
START=str(start_date), STOP=str(stop_date))
self._table = astropy.table.Table(meta=meta)
mjd_format = '%.5f'
self._table['noon'] = astropy.table.Column(
length=num_nights, format=mjd_format,
description='MJD of local noon before night')
self._table['dusk'] = astropy.table.Column(
length=num_nights, format=mjd_format,
description='MJD of dark/gray sunset')
self._table['dawn'] = astropy.table.Column(
length=num_nights, format=mjd_format,
description='MJD of dark/gray sunrise')
self._table['brightdusk'] = astropy.table.Column(
length=num_nights, format=mjd_format,
description='MJD of bright sunset')
self._table['brightdawn'] = astropy.table.Column(
length=num_nights, format=mjd_format,
description='MJD of bright sunrise')
self._table['brightdusk_LST'] = astropy.table.Column(
length=num_nights, format='%.5f',
description='Apparent LST at brightdawn in degrees')
self._table['brightdawn_LST'] = astropy.table.Column(
length=num_nights, format='%.5f',
description='Apparent LST at brightdusk in degrees')
self._table['moonrise'] = astropy.table.Column(
length=num_nights, format=mjd_format,
description='MJD of moonrise before/during night')
self._table['moonset'] = astropy.table.Column(
length=num_nights, format=mjd_format,
description='MJD of moonset after/during night')
self._table['moon_illum_frac'] = astropy.table.Column(
length=num_nights, format='%.3f',
description='Illuminated fraction of moon surface')
self._table['nearest_full_moon'] = astropy.table.Column(
length=num_nights, format='%.5f',
description='Nearest full moon - local midnight in days')
self._table['programs'] = astropy.table.Column(
length=num_nights, shape=(4,), dtype=np.int16,
description='Program sequence between dusk and dawn')
self._table['changes'] = astropy.table.Column(
length=num_nights, shape=(3,),
description='MJD of program changes between dusk and dawn')
# Add (ra,dec) arrays for each object that we need to avoid and
# check that ephem has a model for it.
models = {}
for name in list(config.avoid_bodies.keys) + ['sun']:
models[name] = getattr(ephem, name.capitalize())()
self._table[name + '_ra'] = astropy.table.Column(
length=num_nights, shape=(num_obj_steps,), format='%.2f',
description='RA of {0} during night in degrees'.format(name))
self._table[name + '_dec'] = astropy.table.Column(
length=num_nights, shape=(num_obj_steps,), format='%.2f',
description='DEC of {0} during night in degrees'.format(name))
# The moon is required.
if 'moon' not in models:
raise ValueError('Missing required avoid_bodies entry for "moon".')
# Initialize the observer.
mayall = ephem.Observer()
mayall.lat = config.location.latitude().to(u.rad).value
mayall.lon = config.location.longitude().to(u.rad).value
mayall.elevation = config.location.elevation().to(u.m).value
# Configure atmospheric refraction model for rise/set calculations.
mayall.pressure = 1e3 * config.location.pressure().to(u.bar).value
mayall.temp = config.location.temperature().to(u.C).value
# Do not use atmospheric refraction corrections for other calculations.
mayall_no_ar = mayall.copy()
mayall_no_ar.pressure = 0.
# Calculate the MJD corresponding to date=0. in ephem.
# This throws a warning because of the early year, but it is harmless.
with warnings.catch_warnings():
warnings.simplefilter(
'ignore', astropy.utils.exceptions.AstropyUserWarning)
mjd0 = astropy.time.Time(
datetime.datetime(1899, 12, 31, 12, 0, 0)).mjd
# Initialize a grid covering each 24-hour period for
# tabulating the (ra,dec) of objects to avoid.
t_obj = np.linspace(0., 1., num_obj_steps)
# Calculate ephmerides for each night.
for day_offset in range(num_nights):
day = self.start + day_offset * u.day
mayall.date = day.datetime
row = self._table[day_offset]
# Store local noon for this day.
row['noon'] = day.mjd
# Calculate bright twilight.
mayall.horizon = (
config.conditions.BRIGHT.max_sun_altitude().to(u.rad).value)
row['brightdusk'] = mayall.next_setting(
ephem.Sun(), use_center=True) + mjd0
row['brightdawn'] = mayall.next_rising(
ephem.Sun(), use_center=True) + mjd0
# Calculate dark / gray twilight.
mayall.horizon = (
config.conditions.DARK.max_sun_altitude().to(u.rad).value)
row['dusk'] = mayall.next_setting(
ephem.Sun(), use_center=True) + mjd0
row['dawn'] = mayall.next_rising(
ephem.Sun(), use_center=True) + mjd0
# Calculate the moonrise/set for any moon visible tonight.
m0 = ephem.Moon()
# Use the USNO standard for defining moonrise/set, which means that
# it will not exactly correspond to DARK <-> ? program transitions
# at an altitude of 0deg.
mayall.horizon = '-0:34'
row['moonrise'] = mayall.next_rising(m0) + mjd0
if row['moonrise'] > row['brightdawn']:
# Any moon visible tonight is from the previous moon rise.
row['moonrise'] = mayall.previous_rising(m0) + mjd0
mayall.date = row['moonrise'] - mjd0
row['moonset'] = mayall.next_setting(ephem.Moon()) + mjd0
# Calculate the fraction of the moon's surface that is illuminated
# at local midnight.
m0.compute(row['noon'] + 0.5 - mjd0)
row['moon_illum_frac'] = m0.moon_phase
# Loop over objects to avoid.
for i, t in enumerate(t_obj):
# Set the date of the no-refraction model.
mayall_no_ar.date = row['noon'] + t - mjd0
for name, model in models.items():
model.compute(mayall_no_ar)
row[name + '_ra'][i] = math.degrees(float(model.ra))
row[name + '_dec'][i] = math.degrees(float(model.dec))
# Build a 1s grid covering the night.
step_size_sec = 1
step_size_day = step_size_sec / 86400.
dmjd_grid = desisurvey.ephem.get_grid(step_size=step_size_sec * u.s)
# Loop over nights to calculate the program sequence.
self._table['programs'][:] = -1
self._table['changes'][:] = 0.
for row in self._table:
mjd_grid = dmjd_grid + row['noon'] + 0.5
pindex = self.tabulate_program(
mjd_grid, include_twilight=False, as_tuple=False)
assert pindex[0] == -1 and pindex[-1] == -1
# Calculate index-1 where new programs starts (-1 because of np.diff)
changes = np.where(np.diff(pindex) != 0)[0]
# Must have at least DAY -> NIGHT -> DAY changes.
assert len(changes) >= 2 and pindex[changes[0]] == -1 and pindex[changes[-1] + 1] == -1
# Max possible changes is 5.
assert len(changes) <= 6
# Check that first change is at dusk.
assert np.abs(mjd_grid[changes[0]] + 0.5 * step_size_day - row['dusk']) <= step_size_day
# Check that the last change is at dusk.
assert np.abs(mjd_grid[changes[-1]] + 0.5 * step_size_day - row['dawn']) <= step_size_day
row['programs'][0] = pindex[changes[0] + 1]
for k, idx in enumerate(changes[1:-1]):
row['programs'][k + 1] = pindex[idx + 1]
row['changes'][k] = mjd_grid[idx] + 0.5 * step_size_day
# Tabulate all full moons covering (start, stop) with a 30-day pad.
full_moons = []
lo, hi = self._table[0]['noon'] - 30 - mjd0, self._table[-1]['noon'] + 30 - mjd0
when = lo
while when < hi:
when = ephem.next_full_moon(when)
full_moons.append(when)
full_moons = np.array(full_moons) + mjd0
# Find the first full moon after each midnight.
midnight = self._table['noon'] + 0.5
idx = np.searchsorted(full_moons, midnight, side='left')
assert np.all(midnight <= full_moons[idx])
assert np.all(midnight > full_moons[idx - 1])
# Calculate time until next full moon and after previous full moon.
next_full_moon = full_moons[idx] - midnight
prev_full_moon = midnight - full_moons[idx - 1]
# Record the nearest full moon to each midnight.
next_is_nearest = next_full_moon <= prev_full_moon
self._table['nearest_full_moon'][next_is_nearest] = next_full_moon[next_is_nearest]
self._table['nearest_full_moon'][~next_is_nearest] = -prev_full_moon[~next_is_nearest]
# Calculate apparent LST at each brightdusk/dawn in degrees.
dusk_t = astropy.time.Time(self._table['brightdusk'].data, format='mjd')
dawn_t = astropy.time.Time(self._table['brightdawn'].data, format='mjd')
dusk_t.location = desisurvey.utils.get_location()
dawn_t.location = desisurvey.utils.get_location()
self._table['brightdusk_LST'] = dusk_t.sidereal_time('apparent').to(u.deg).value
self._table['brightdawn_LST'] = dawn_t.sidereal_time('apparent').to(u.deg).value
# Subtract 360 deg if LST wraps around during this night, so that the
# [dusk, dawn] values can be used for linear interpolation.
wrap = self._table['brightdusk_LST'] > self._table['brightdawn_LST']
self._table['brightdusk_LST'][wrap] -= 360
assert np.all(self._table['brightdawn_LST'] > self._table['brightdusk_LST'])
def get_row(self, row_index):
"""Return the specified row of our table.
Parameters
----------
row_index : int
Index starting from zero of the requested row. Negative values
are allowed and specify offsets from the end of the table in
the usual way.
Returns
astropy.table.Row or int
Row of ephemeris data for the requested night.
"""
if row_index < -self.num_nights or row_index >= self.num_nights:
raise ValueError('Requested row index outside table: {0}'
.format(row_index))
return self._table[row_index]
@property
def table(self):
"""Read-only access to our internal table."""
return self._table
def get_night(self, night, as_index=False):
"""Return the row of ephemerides for a single night.
Parameters
----------
night : date
Converted to a date using :func:`desisurvey.utils.get_date`.
as_index : bool
Return the row index of the specified night in our per-night table
if True. Otherwise return the row itself.
Returns
-------
astropy.table.Row or int
Row of ephemeris data for the requested night or the index
of this row (selected via ``as_index``).
"""
date = desisurvey.utils.get_date(night)
row_index = (date - self.start_date).days
if row_index < 0 or row_index >= self.num_nights:
raise ValueError('Requested night outside ephemerides: {0}'
.format(night))
return row_index if as_index else self._table[row_index]
def get_moon_illuminated_fraction(self, mjd):
"""Return the illuminated fraction of the moon.
Uses linear interpolation on the tabulated fractions at midnight and
should be accurate to about 0.01. For reference, the fraction changes
by up to 0.004 per hour.
Parameters
----------
mjd : float or array
MJD values during a single night where the program should be
tabulated.
Returns
-------
float or array
Illuminated fraction at each input time.
"""
mjd = np.asarray(mjd)
if (np.min(mjd) < self._table['noon'][0] or
np.max(mjd) >= self._table['noon'][-1] + 1):
raise ValueError('Requested MJD is outside ephemerides range.')
if self._moon_illum_frac_interpolator is None:
# Lazy initialization of a cubic interpolator.
midnight = self._table['noon'] + 0.5
self._moon_illum_frac_interpolator = scipy.interpolate.interp1d(
midnight, self._table['moon_illum_frac'], copy=True,
kind='linear', fill_value='extrapolate', assume_sorted=True)
return self._moon_illum_frac_interpolator(mjd)
def get_night_program(self, night, include_twilight=False, program_as_int=False):
"""Return the program sequence for one night.
The program definitions are taken from
:class:`desisurvey.config.Configuration` and depend only on
sun and moon ephemerides for the night.
Parameters
----------
night : date
Converted to a date using :func:`desisurvey.utils.get_date`.
include_twilight : bool
Include twilight time at the start and end of each night in
the BRIGHT program.
program_as_int : bool
Return program encoded as a small integer instead of a string
when True.
Returns
-------
tuple
Tuple (programs, changes) where programs is a list of N program
names and changes is a 1D numpy array of N+1 MJD values that
bracket each program during the night.
"""
night_ephem = self.get_night(night)
programs = night_ephem['programs']
changes = night_ephem['changes']
# Unused slots are -1.
num_programs = np.count_nonzero(programs >= 0)
programs = programs[:num_programs]
changes = changes[:num_programs - 1]
if include_twilight:
start = night_ephem['brightdusk']
stop = night_ephem['brightdawn']
BRIGHT = desisurvey.tiles.Tiles.CONDITION_INDEX['BRIGHT']
if programs[0] != BRIGHT:
# Twilight adds a BRIGHT program at the start of the night.
programs = np.insert(programs, 0, BRIGHT)
changes = np.insert(changes, 0, night_ephem['dusk'])
if programs[-1] != BRIGHT:
# Twilight adds a BRIGHT program at the end of the night.
programs = np.append(programs, BRIGHT)
changes = np.append(changes, night_ephem['dawn'])
else:
start = night_ephem['dusk']
stop = night_ephem['dawn']
# Add start, stop to the change times.
changes = np.concatenate(([start], changes, [stop]))
if not program_as_int:
# Replace program indices with names.
programs = [desisurvey.tiles.Tiles.CONDITIONS[pidx] for pidx in programs]
return programs, changes
def get_program_hours(self, start_date=None, stop_date=None,
include_monsoon=False, include_full_moon=False,
include_twilight=True):
"""Tabulate hours in each program during each night of the survey.
Use :func:`desisurvey.plots.plot_program` to visualize program hours.
This method calculates scheduled hours with no correction for weather.
Use 1 - :func:`desimodel.weather.dome_closed_fractions` to lookup
nightly corrections based on historical weather data.
Parameters
----------
ephem : :class:`desisurvey.ephem.Ephemerides`
Tabulated ephemerides data to use for determining the program.
start_date : date or None
First night to include or use the first date of the survey. Must
be convertible to a date using :func:`desisurvey.utils.get_date`.
stop_date : date or None
First night to include or use the last date of the survey. Must
be convertible to a date using :func:`desisurvey.utils.get_date`.
include_monsoon : bool
Include nights during the annual monsoon shutdowns.
include_fullmoon : bool
Include nights during the monthly full-moon breaks.
include_twilight : bool
Include twilight time at the start and end of each night in
the BRIGHT program.
Returns
-------
array
Numpy array of shape (3, num_nights) containing the number of
hours in each program (0=DARK, 1=GRAY, 2=BRIGHT) during each
night.
"""
# Determine date range to use.
config = desisurvey.config.Configuration()
if start_date is None:
start_date = config.first_day()
else:
start_date = desisurvey.utils.get_date(start_date)
if stop_date is None:
stop_date = config.last_day()
else:
stop_date = desisurvey.utils.get_date(stop_date)
if start_date >= stop_date:
raise ValueError('Expected start_date < stop_date.')
num_nights = (stop_date - start_date).days
hours = np.zeros((3, num_nights))
for i in range(num_nights):
tonight = start_date + datetime.timedelta(days=i)
if not include_monsoon and desisurvey.utils.is_monsoon(tonight):
continue
if not include_full_moon and self.is_full_moon(tonight):
continue
programs, changes = self.get_night_program(
tonight, include_twilight=include_twilight, program_as_int=True)
for p, dt in zip(programs, np.diff(changes)):
hours[p, i] += dt
hours *= 24
return hours
def get_available_lst(self, start_date=None, stop_date=None, nbins=192, origin=-60,
weather=None, include_monsoon=False, include_full_moon=False,
include_twilight=False):
"""Calculate histograms of available LST for each program.
Parameters
----------
start_date : date or None
First night to include or use the first date of the survey. Must
be convertible to a date using :func:`desisurvey.utils.get_date`.
stop_date : date or None
First night to include or use the last date of the survey. Must
be convertible to a date using :func:`desisurvey.utils.get_date`.
nbins : int
Number of LST bins to use.
origin : float
Rotate DEC values in plots so that the left edge is at this value
in degrees.
weather : array or None
1D array of nightly weather factors (0-1) to use, or None to calculate
available LST assuming perfect weather. Length must equal the number
of nights between start and stop. Values are fraction of the night
with the dome open (0=never, 1=always). Use
1 - :func:`desimodel.weather.dome_closed_fractions` to lookup
suitable corrections based on historical weather data.
include_monsoon : bool
Include nights during the annual monsoon shutdowns.
include_fullmoon : bool
Include nights during the monthly full-moon breaks.
include_twilight : bool
Include twilight in the BRIGHT program when True.
Returns
-------
tuple
Tuple (lst_hist, lst_bins) with lst_hist having shape (3,nbins) and
lst_bins having shape (nbins+1,).
"""
config = desisurvey.config.Configuration()
if start_date is None:
start_date = config.first_day()
else:
start_date = desisurvey.utils.get_date(start_date)
if stop_date is None:
stop_date = config.last_day()
else:
stop_date = desisurvey.utils.get_date(stop_date)
num_nights = (stop_date - start_date).days
if num_nights <= 0:
raise ValueError('Expected start_date < stop_date.')
if weather is not None:
weather = np.asarray(weather)
if len(weather) != num_nights:
raise ValueError('Expected weather array of length {}.'.format(num_nights))
# Initialize LST histograms for each program.
lst_bins = np.linspace(origin, origin + 360, nbins + 1)
lst_hist = np.zeros((len(desisurvey.tiles.Tiles.CONDITIONS), nbins))
dlst = 360. / nbins
# Loop over nights.
for n in range(num_nights):
night = start_date + datetime.timedelta(n)
if not include_monsoon and desisurvey.utils.is_monsoon(night):
continue
if not include_full_moon and self.is_full_moon(night):
continue
# Look up the program changes during this night.
programs, changes = self.get_night_program(
night, include_twilight, program_as_int=True)
# Convert each change MJD to a corresponding LST in degrees.
night_ephem = self.get_night(night)
MJD0, MJD1 = night_ephem['brightdusk'], night_ephem['brightdawn']
LST0, LST1 = [night_ephem['brightdusk_LST'], night_ephem['brightdawn_LST']]
lst_changes = LST0 + (changes - MJD0) * (LST1 - LST0) / (MJD1 - MJD0)
assert np.all(np.diff(lst_changes) > 0)
lst_bin = (lst_changes - origin) / 360 * nbins
# Loop over programs during the night.
for i, prog_index in enumerate(programs):
phist = lst_hist[prog_index]
lo, hi = lst_bin[i:i + 2]
# Ensure that 0 <= lo < nbins
left_edge = np.floor(lo / nbins) * nbins
lo -= left_edge
hi -= left_edge
assert 0 <= lo and lo < nbins
ilo = int(np.ceil(lo))
assert ilo > 0
# Calculate the weight of this night in sidereal hours.
wgt = 24 / nbins
if weather is not None:
wgt *= weather[n]
# Divide this program's LST window among the LST bins.
if hi < nbins:
# [lo,hi) falls completely within [0,nbins)
ihi = int(np.floor(hi))
if ilo == ihi + 1:
# LST window is contained within a single LST bin.
phist[ihi] += (hi - lo) * wgt
else:
# Accumulate to bins that fall completely within the window.
phist[ilo:ihi] += wgt
# Accumulate to partial bins at each end of the program window.
phist[ilo - 1] += (ilo - lo) * wgt
phist[ihi] += (hi - ihi) * wgt
else:
# [lo,hi) wraps around on the right edge.
hi -= nbins
assert hi >= 0 and hi < nbins
ihi = int(np.floor(hi))
# Accumulate to bins that fall completely within the window.
phist[ilo:nbins] += wgt
phist[0:ihi] += wgt
# Accumulate partial bins at each end of the program window.
phist[ilo - 1] += (ilo - lo) * wgt
phist[ihi] += (hi - ihi) * wgt
return lst_hist, lst_bins
def tabulate_program(self, mjd, include_twilight=False, as_tuple=True):
"""Tabulate the program during one night.
The program definitions are taken from
:class:`desisurvey.config.Configuration` and depend only on
sun and moon ephemerides for the night.
Parameters
----------
mjd : float or array
MJD values during a single night where the program should be
tabulated.
include_twilight : bool
Include twilight time at the start and end of each night in
the BRIGHT program.
as_tuple : bool
Return a tuple (dark, gray, bright) or else a vector of int16
values.
Returns
-------
tuple or array
Tuple (dark, gray, bright) of boolean arrays that tabulates the
program at each input MJD or an array of small integer indices
into :attr:`desisurvey.tiles.Tiles.CONDITIONS`, with the special
value -1 indicating DAYTIME. All output arrays have the same shape
as the input ``mjd`` array.
"""
# Get the night of the earliest time.
mjd = np.asarray(mjd)
night = self.get_night(astropy.time.Time(np.min(mjd), format='mjd'))
# Check that all input MJDs are valid for this night.
mjd0 = night['noon']
if np.any((mjd < mjd0) | (mjd >= mjd0 + 1)):
raise ValueError('MJD values span more than one night.')
# Calculate the moon (ra, dec) in degrees at each grid time.
interpolator = get_object_interpolator(night, 'moon', altaz=True)
moon_alt, _ = interpolator(mjd)
# Calculate the moon illuminated fraction at each time.
moon_frac = self.get_moon_illuminated_fraction(mjd)
# Select bright and dark night conditions.
dark_night = (mjd >= night['dusk']) & (mjd <= night['dawn'])
if include_twilight:
bright_night = (
mjd >= night['brightdusk']) & (mjd <= night['brightdawn'])
else:
bright_night = dark_night
# Identify program during each MJD.
GRAY = desisurvey.config.Configuration().conditions.GRAY
max_prod = GRAY.max_moon_illumination_altitude_product().to(u.deg).value
max_frac = GRAY.max_moon_illumination()
gray = dark_night & (moon_alt >= 0) & (
(moon_frac <= max_frac) &
(moon_frac * moon_alt <= max_prod))
dark = dark_night & (moon_alt < 0)
bright = bright_night & ~(dark | gray)
assert not np.any(dark & gray | dark & bright | gray & bright)
if as_tuple:
return dark, gray, bright
else:
# Default value -1=DAYTIME.
program = np.full(mjd.shape, -1, np.int16)
program[dark] = desisurvey.tiles.Tiles.CONDITION_INDEX['DARK']
program[gray] = desisurvey.tiles.Tiles.CONDITION_INDEX['GRAY']
program[bright] = desisurvey.tiles.Tiles.CONDITION_INDEX['BRIGHT']
return program
def is_full_moon(self, night, num_nights=None):
"""Test if a night occurs during a full-moon break.
The full moon break is defined as the ``num_nights`` nights where
the moon is most fully illuminated at local midnight. This method
should normally be called with ``num_nights`` equal to None, in which
case the value is taken from our
:class:`desisurvey.config.Configuration``.
Parameters
----------
night : date
Converted to a date using :func:`desisurvey.utils.get_date`.
num_nights : int or None
Number of nights to block out around each full-moon.
Returns
-------
bool
True if the specified night falls during a full-moon break.
"""
# Check the requested length of the full moon break.
if num_nights is None:
num_nights = desisurvey.config.Configuration().full_moon_nights()
# Look up the index of this night in our table.
index = self.get_night(night, as_index=True)
# When is the nearest full moon?
nearest = self._table['nearest_full_moon'][index]
if np.abs(nearest) < 0.5 * num_nights:
return True
elif nearest == 0.5 * num_nights:
# Tie breaker if two nights are equally close.
return True
else:
return False
def get_object_interpolator(row, object_name, altaz=False):
"""Build an interpolator for object location during one night.
Wrap around in RA is handled correctly and we assume that the object never
wraps around in DEC. The interpolated unit vectors should be within
0.3 degrees of the true unit vectors in both (dec,ra) and (alt,az).
Parameters
----------
row : astropy.table.Row
A single row from the ephemerides astropy Table corresponding to the
night in question.
object_name : string
Name of the object to build an interpolator for. Must be listed under
avoid_objects in :class:`our configuration
<desisurvey.config.Configuration>`.
altaz : bool
Interpolate in (alt,az) if True, else interpolate in (dec,ra).
Returns
-------
callable
A callable object that takes a single MJD value or an array of MJD
values and returns the corresponding (dec,ra) or (alt,az) values in
degrees, with -90 <= dec,alt <= +90 and 0 <= ra,az < 360.
"""
# Find the tabulated (ra, dec) values for the requested object.
try:
ra = row[object_name + '_ra']
dec = row[object_name + '_dec']
except AttributeError:
raise ValueError('Invalid object_name {0}.'.format(object_name))
# Calculate the grid of MJD time steps where (ra,dec) are tabulated.
t_obj = row['noon'] + np.linspace(0., 1., len(ra))
# Interpolate in (theta,phi) = (dec,ra) or (alt,az)?
if altaz:
# Convert each (ra,dec) to (alt,az) at the appropriate time.
times = astropy.time.Time(t_obj, format='mjd')
frame = desisurvey.utils.get_observer(times)
sky = astropy.coordinates.ICRS(ra=ra * u.deg, dec=dec * u.deg)
altaz = sky.transform_to(frame)
theta = altaz.alt.to(u.deg).value
phi = altaz.az.to(u.deg).value
else:
theta = dec
phi = ra
# Construct arrays of (theta, cos(phi), sin(phi)) values for this night.
# Use cos(phi), sin(phi) instead of phi directly to avoid wrap-around
# discontinuities. Leave theta in degrees.
data = np.empty((3, len(ra)))
data[0] = theta
phi = np.radians(phi)
data[1] = np.cos(phi)
data[2] = np.sin(phi)
# Build a cubic interpolator in (alt, az) during this interval.
# Return (0, 0, 0) outside the interval.
interpolator = scipy.interpolate.interp1d(
t_obj, data, axis=1, kind='cubic', copy=True,
bounds_error=False, fill_value=0., assume_sorted=True)
# Wrap the interpolator to convert (cos(phi), sin(phi)) back to an angle
# in degrees.
def wrapper(mjd):
theta, cos_phi, sin_phi = interpolator(mjd)
# Map arctan2 range [-180, +180] into [0, 360] with fmod().
phi = np.fmod(360 + np.degrees(np.arctan2(sin_phi, cos_phi)), 360)
return theta, phi
return wrapper
def get_grid(step_size=1, night_start=-6, night_stop=7):
"""Calculate a grid of equally spaced times covering one night.
In case the requested step size does not evenly divide the requested
range, the last grid point will be rounded up.
The default range covers all possible observing times at KPNO.
Parameters
----------
step_size : :class:`astropy.units.Quantity`, optional
Size of each grid step with time units, default 1 min.
night_start : :class:`astropy.units.Quantity`, optional
First grid point relative to local midnight with time units, default -6 h.
night_stop : :class:`astropy.units.Quantity`, optional
Last grid point relative to local midnight with time units, default 7 h.
Returns
-------
array
Numpy array of dimensionless offsets relative to local midnight
in units of days.
"""
if not isinstance(step_size, u.Quantity):
step_size = step_size * u.min
if not isinstance(night_start, u.Quantity):
night_start = night_start * u.hour
if not isinstance(night_stop, u.Quantity):
night_stop = night_stop * u.hour
num_points = int(round(((night_stop - night_start) / step_size).to(1).value))
night_stop = night_start + num_points * step_size
return (night_start.to(u.day).value +
step_size.to(u.day).value * np.arange(num_points + 1))
| 43.928817 | 101 | 0.61186 |
edaa0aa99d4a4d19e1d50be2c7019bc5ea62a2b8 | 4,989 | py | Python | airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/config.py | darian-heede/airbyte | 504580d833582f8800b334f24e57a414d94389bf | [
"MIT"
] | 1 | 2022-03-31T18:23:00.000Z | 2022-03-31T18:23:00.000Z | airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/config.py | darian-heede/airbyte | 504580d833582f8800b334f24e57a414d94389bf | [
"MIT"
] | 2 | 2021-09-30T16:58:58.000Z | 2021-11-26T17:58:59.000Z | airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/config.py | darian-heede/airbyte | 504580d833582f8800b334f24e57a414d94389bf | [
"MIT"
] | 2 | 2021-04-28T15:15:37.000Z | 2022-03-28T17:32:15.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from enum import Enum
from pathlib import Path
from typing import List, Mapping, Optional, Set
from pydantic import BaseModel, Field, validator
config_path: str = Field(default="secrets/config.json", description="Path to a JSON object representing a valid connector configuration")
invalid_config_path: str = Field(description="Path to a JSON object representing an invalid connector configuration")
spec_path: str = Field(
default="secrets/spec.json", description="Path to a JSON object representing the spec expected to be output by this connector"
)
configured_catalog_path: str = Field(default="integration_tests/configured_catalog.json", description="Path to configured catalog")
timeout_seconds: int = Field(default=None, description="Test execution timeout_seconds", ge=0)
class BaseConfig(BaseModel):
class Config:
extra = "forbid"
class SpecTestConfig(BaseConfig):
spec_path: str = spec_path
config_path: str = config_path
timeout_seconds: int = timeout_seconds
class ConnectionTestConfig(BaseConfig):
class Status(Enum):
Succeed = "succeed"
Failed = "failed"
Exception = "exception"
config_path: str = config_path
status: Status = Field(Status.Succeed, description="Indicate if connection check should succeed with provided config")
timeout_seconds: int = timeout_seconds
class DiscoveryTestConfig(BaseConfig):
config_path: str = config_path
timeout_seconds: int = timeout_seconds
class ExpectedRecordsConfig(BaseModel):
class Config:
extra = "forbid"
path: Path = Field(description="File with expected records")
extra_fields: bool = Field(False, description="Allow records to have other fields")
exact_order: bool = Field(False, description="Ensure that records produced in exact same order")
extra_records: bool = Field(
True, description="Allow connector to produce extra records, but still enforce all records from the expected file to be produced"
)
@validator("exact_order", always=True)
def validate_exact_order(cls, exact_order, values):
if "extra_fields" in values:
if values["extra_fields"] and not exact_order:
raise ValueError("exact_order must be on if extra_fields enabled")
return exact_order
@validator("extra_records", always=True)
def validate_extra_records(cls, extra_records, values):
if "extra_fields" in values:
if values["extra_fields"] and extra_records:
raise ValueError("extra_records must by off if extra_fields enabled")
return extra_records
class BasicReadTestConfig(BaseConfig):
config_path: str = config_path
configured_catalog_path: Optional[str] = configured_catalog_path
empty_streams: Set[str] = Field(default_factory=set, description="We validate that all streams has records. These are exceptions")
expect_records: Optional[ExpectedRecordsConfig] = Field(description="Expected records from the read")
validate_schema: bool = Field(True, description="Ensure that records match the schema of the corresponding stream")
timeout_seconds: int = timeout_seconds
class FullRefreshConfig(BaseConfig):
"""Full refresh test config
Attributes:
ignored_fields for each stream, list of fields path. Path should be in format "object_key/object_key2"
"""
config_path: str = config_path
configured_catalog_path: str = configured_catalog_path
timeout_seconds: int = timeout_seconds
ignored_fields: Optional[Mapping[str, List[str]]] = Field(
description="For each stream, list of fields path ignoring in sequential reads test"
)
class IncrementalConfig(BaseConfig):
config_path: str = config_path
configured_catalog_path: str = configured_catalog_path
cursor_paths: Optional[Mapping[str, List[str]]] = Field(
description="For each stream, the path of its cursor field in the output state messages."
)
future_state_path: Optional[str] = Field(description="Path to a state file with values in far future")
timeout_seconds: int = timeout_seconds
class TestConfig(BaseConfig):
spec: Optional[List[SpecTestConfig]] = Field(description="TODO")
connection: Optional[List[ConnectionTestConfig]] = Field(description="TODO")
discovery: Optional[List[DiscoveryTestConfig]] = Field(description="TODO")
basic_read: Optional[List[BasicReadTestConfig]] = Field(description="TODO")
full_refresh: Optional[List[FullRefreshConfig]] = Field(description="TODO")
incremental: Optional[List[IncrementalConfig]] = Field(description="TODO")
class Config(BaseConfig):
connector_image: str = Field(description="Docker image to test, for example 'airbyte/source-hubspot:dev'")
base_path: Optional[str] = Field(description="Base path for all relative paths")
tests: TestConfig = Field(description="List of the tests with their configs")
| 41.231405 | 137 | 0.742233 |
f7492e49c9c1c46f2df91362feee8c4fc02288c8 | 27,205 | py | Python | docs/beta/code/Tracking.py | leonbett/debuggingbook | ae1fa940c306160429232fbc93a7a7f14b44efb7 | [
"MIT"
] | null | null | null | docs/beta/code/Tracking.py | leonbett/debuggingbook | ae1fa940c306160429232fbc93a7a7f14b44efb7 | [
"MIT"
] | null | null | null | docs/beta/code/Tracking.py | leonbett/debuggingbook | ae1fa940c306160429232fbc93a7a7f14b44efb7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Tracking Bugs" - a chapter of "The Debugging Book"
# Web site: https://www.debuggingbook.org/html/Tracking.html
# Last change: 2021-05-12 17:37:52+02:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Debugging Book - Tracking Bugs
This file can be _executed_ as a script, running all experiments:
$ python Tracking.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from debuggingbook.Tracking import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.debuggingbook.org/html/Tracking.html
This chapter provides no functionality that could be used by third-party code.
For more details, source, and documentation, see
"The Debugging Book - Tracking Bugs"
at https://www.debuggingbook.org/html/Tracking.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'debuggingbook'
# Tracking Bugs
# =============
if __name__ == '__main__':
print('# Tracking Bugs')
if __name__ == '__main__':
from .bookutils import YouTubeVideo
YouTubeVideo("bJzHYzvxHm8")
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
from . import Intro_Debugging
import os
import sys
if __name__ == '__main__':
if 'CI' in os.environ:
# Can't run this in our continuous environment,
# since it can't run a headless Web browser
sys.exit(0)
if __name__ == '__main__':
assert os.getenv('USER') == 'zeller'
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Reporting Issues
## ----------------
if __name__ == '__main__':
print('\n## Reporting Issues')
### What Goes in a Bug Report?
if __name__ == '__main__':
print('\n### What Goes in a Bug Report?')
#### Steps to Reproduce (83%)
if __name__ == '__main__':
print('\n#### Steps to Reproduce (83%)')
#### Stack Traces (57%)
if __name__ == '__main__':
print('\n#### Stack Traces (57%)')
#### Test Cases (51%)
if __name__ == '__main__':
print('\n#### Test Cases (51%)')
#### Observed Behavior (33%)
if __name__ == '__main__':
print('\n#### Observed Behavior (33%)')
#### Screenshots (26%)
if __name__ == '__main__':
print('\n#### Screenshots (26%)')
#### Expected Behavior (22%)
if __name__ == '__main__':
print('\n#### Expected Behavior (22%)')
#### Configuration Information (< 12%)
if __name__ == '__main__':
print('\n#### Configuration Information (< 12%)')
### Reporting Crashes Automatically
if __name__ == '__main__':
print('\n### Reporting Crashes Automatically')
### Effective Issue Reporting
if __name__ == '__main__':
print('\n### Effective Issue Reporting')
## An Issue Tracker
## ----------------
if __name__ == '__main__':
print('\n## An Issue Tracker')
### Excursion: Setting up Redmine
if __name__ == '__main__':
print('\n### Excursion: Setting up Redmine')
import subprocess
import os
import sys
def with_ruby(cmd: str, inp: str = '', timeout: int = 30, show_stdout: bool = False) -> None:
print(f"$ {cmd}")
shell = subprocess.Popen(['/bin/sh', '-c',
f'''rvm_redmine=$HOME/.rvm/gems/ruby-2.7.2@redmine; \
rvm_global=$HOME/.rvm/gems/ruby-2.7.2@global; \
export GEM_PATH=$rvm_redmine:$rvm_global; \
export PATH=$rvm_redmine/bin:$rvm_global/bin:$HOME/.rvm/rubies/ruby-2.7.2/bin:$HOME/.rvm/bin:$PATH; \
cd $HOME/lib/redmine && {cmd}'''],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
try:
stdout_data, stderr_data = shell.communicate(inp, timeout=timeout)
except subprocess.TimeoutExpired:
shell.kill()
# stdout_data, stderr_data = shell.communicate(inp)
# if show_stdout:
# print(stdout_data, end="")
# print(stderr_data, file=sys.stderr, end="")
raise
print(stderr_data, file=sys.stderr, end="")
if show_stdout:
print(stdout_data, end="")
def with_mysql(cmd: str, timeout: int = 2, show_stdout: bool = False) -> None:
print(f"sql>{cmd}")
sql = subprocess.Popen(["mysql", "-u", "root",
"--default-character-set=utf8mb4"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
try:
stdout_data, stderr_data = sql.communicate(cmd + ';',
timeout=timeout)
except subprocess.TimeoutExpired:
sql.kill()
# stdout_data, stderr_data = sql.communicate(inp)
# if show_stdout:
# print(stdout_data, end="")
# print(stderr_data, file=sys.stderr, end="")
raise
print(stderr_data, file=sys.stderr, end="")
if show_stdout:
print(stdout_data, end="")
if __name__ == '__main__':
with_ruby("bundle config set without development test")
if __name__ == '__main__':
with_ruby("bundle install")
if __name__ == '__main__':
with_ruby("pkill sql; sleep 5")
if __name__ == '__main__':
try:
with_ruby("mysql.server start", show_stdout=True)
except subprocess.TimeoutExpired:
pass # Can actually start without producing output
if __name__ == '__main__':
with_mysql("drop database redmine")
if __name__ == '__main__':
with_mysql("drop user 'redmine'@'localhost'")
if __name__ == '__main__':
with_mysql("create database redmine character set utf8")
if __name__ == '__main__':
with_mysql("create user 'redmine'@'localhost' identified by 'my_password'")
if __name__ == '__main__':
with_mysql("grant all privileges on redmine.* to 'redmine'@'localhost'")
if __name__ == '__main__':
with_ruby("bundle exec rake generate_secret_token")
if __name__ == '__main__':
with_ruby("RAILS_ENV=production bundle exec rake db:migrate")
if __name__ == '__main__':
with_ruby("RAILS_ENV=production bundle exec rake redmine:load_default_data", '\n')
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: Starting Redmine
if __name__ == '__main__':
print('\n### Excursion: Starting Redmine')
import os
import time
from multiprocessing import Process
from typing import Tuple
def run_redmine(port: int) -> None:
with_ruby(f'exec rails s -e production -p {port} > redmine.log 2>&1',
timeout=3600)
def start_redmine(port: int = 3000) -> Tuple[Process, str]:
process = Process(target=run_redmine, args=(port,))
process.start()
time.sleep(5)
url = f"http://localhost:{port}"
return process, url
if __name__ == '__main__':
redmine_process, redmine_url = start_redmine()
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: Remote Control with Selenium
if __name__ == '__main__':
print('\n### Excursion: Remote Control with Selenium')
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
BROWSER = 'firefox'
if __name__ == '__main__':
with_ruby("pkill Firefox.app firefox-bin")
from .bookutils import rich_output
HEADLESS = True
from selenium.webdriver.remote.webdriver import WebDriver
def start_webdriver(browser: str = BROWSER, headless: bool = HEADLESS, zoom: float = 4.0) -> WebDriver:
if browser == 'firefox':
options = webdriver.FirefoxOptions()
if browser == 'chrome':
options = webdriver.ChromeOptions()
if headless and browser == 'chrome':
options.add_argument('headless')
else:
options.headless = headless
# Start the browser, and obtain a _web driver_ object such that we can interact with it.
if browser == 'firefox':
# For firefox, set a higher resolution for our screenshots
profile = webdriver.firefox.firefox_profile.FirefoxProfile()
profile.set_preference("layout.css.devPixelsPerPx", repr(zoom))
redmine_gui = webdriver.Firefox(firefox_profile=profile, options=options)
# We set the window size such that it fits
redmine_gui.set_window_size(500, 600) # was 1024, 600
elif browser == 'chrome':
redmine_gui = webdriver.Chrome(options=options)
redmine_gui.set_window_size(1024, 510 if headless else 640)
return redmine_gui
if __name__ == '__main__':
redmine_gui = start_webdriver(browser=BROWSER, headless=HEADLESS)
if __name__ == '__main__':
redmine_gui.get(redmine_url)
if __name__ == '__main__':
from IPython.display import display, Image
if __name__ == '__main__':
Image(redmine_gui.get_screenshot_as_png())
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: Screenshots with Drop Shadows
if __name__ == '__main__':
print('\n### Excursion: Screenshots with Drop Shadows')
import tempfile
def drop_shadow(contents: bytes) -> bytes:
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(contents)
convert = subprocess.Popen(
['convert', tmp.name,
'(', '+clone', '-background', 'black', '-shadow', '50x10+15+15', ')',
'+swap', '-background', 'none', '-layers', 'merge', '+repage', '-'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, stderr_data = convert.communicate()
if stderr_data:
print(stderr_data.decode("utf-8"), file=sys.stderr, end="")
return stdout_data
def screenshot(driver: WebDriver, width: int = 500) -> bytes:
return Image(drop_shadow(redmine_gui.get_screenshot_as_png()), width=width)
if __name__ == '__main__':
screenshot(redmine_gui)
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: First Registration at Redmine
if __name__ == '__main__':
print('\n### Excursion: First Registration at Redmine')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/login')
if __name__ == '__main__':
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_id("username").send_keys("admin")
redmine_gui.find_element_by_id("password").send_keys("admin")
redmine_gui.find_element_by_name("login").click()
if __name__ == '__main__':
time.sleep(2)
if __name__ == '__main__':
if redmine_gui.current_url.endswith('my/password'):
redmine_gui.get(redmine_url + '/my/password')
redmine_gui.find_element_by_id("password").send_keys("admin")
redmine_gui.find_element_by_id("new_password").send_keys("admin001")
redmine_gui.find_element_by_id("new_password_confirmation").send_keys("admin001")
display(screenshot(redmine_gui))
redmine_gui.find_element_by_name("commit").click()
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/logout')
redmine_gui.find_element_by_name("commit").click()
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/login')
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_id("username").send_keys("admin")
redmine_gui.find_element_by_id("password").send_keys("admin001")
redmine_gui.find_element_by_name("login").click()
screenshot(redmine_gui)
### Excursion: Creating a Project
if __name__ == '__main__':
print('\n### Excursion: Creating a Project')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects')
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects/new')
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects/new')
redmine_gui.find_element_by_id('project_name').send_keys("The Debugging Book")
redmine_gui.find_element_by_id('project_description').send_keys("A Book on Automated Debugging")
redmine_gui.find_element_by_id('project_identifier').clear()
redmine_gui.find_element_by_id('project_identifier').send_keys("debuggingbook")
redmine_gui.find_element_by_id('project_homepage').send_keys("https://www.debuggingbook.org/")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_name('commit').click()
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects')
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects/debuggingbook')
screenshot(redmine_gui)
## Reporting an Issue
## ------------------
if __name__ == '__main__':
print('\n## Reporting an Issue')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/issues/new')
screenshot(redmine_gui)
if __name__ == '__main__':
issue_title = "Does not render correctly on Nokia Communicator"
if __name__ == '__main__':
issue_description = \
"""The Debugging Book does not render correctly on the Nokia Communicator 9000.
Steps to reproduce:
1. On the Nokia, go to "https://debuggingbook.org/"
2. From the menu on top, select the chapter "Tracking Origins".
3. Scroll down to a place where a graph is supposed to be shown.
4. Instead of the graph, only a blank space is displayed.
How to fix:
* The graphs seem to come as SVG elements, but the Nokia Communicator does not support SVG rendering. Render them as JPEGs instead.
"""
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/issues/new')
redmine_gui.find_element_by_id('issue_subject').send_keys(issue_title)
redmine_gui.find_element_by_id('issue_description').send_keys(issue_description)
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_id('issue_assigned_to_id').click()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_name('commit').click()
screenshot(redmine_gui)
from .bookutils import quiz
if __name__ == '__main__':
quiz("How many issues have been reported over time in Mozilla Bugzilla?",
[
"More than ten thousand",
"More than a hundred thousand",
"More than a million",
"More than ten million"
], '370370367 // 123456789')
if __name__ == '__main__':
redmine_gui.get("https://bugzilla.mozilla.org/buglist.cgi?quicksearch=firefox")
if __name__ == '__main__':
screenshot(redmine_gui)
### Excursion: Adding Some More Issue Reports
if __name__ == '__main__':
print('\n### Excursion: Adding Some More Issue Reports')
def new_issue(issue_title: str, issue_description: str) -> bytes:
redmine_gui.get(redmine_url + '/issues/new')
redmine_gui.find_element_by_id('issue_subject').send_keys(issue_title)
redmine_gui.find_element_by_id('issue_description').send_keys(issue_description)
redmine_gui.find_element_by_name('commit').click()
return screenshot(redmine_gui)
if __name__ == '__main__':
new_issue("Missing a Chapter on Parallel Debugging",
"""I am missing a chapter on (automatic) debugging of parallel and distributed systems,
including how to detect and repair data races, log message passing, and more.
In my experience, almost all programs are parallel today, so you are missing
an important subject.
""")
if __name__ == '__main__':
new_issue("Missing a PDF version",
"""Your 'book' does not provide a printed version. I think that printed books
* offer a more enjoyable experience for the reader
* allow me to annotate pages with my own remarks
* allow me to set dog-ear bookmatks
* allow me to show off what I'm currently reading (do you have a cover, too?)
Please provide a printed version - or, at least, produce a PDF version
of the debugging book, and make it available for download, such that I can print it myself.
""")
if __name__ == '__main__':
new_issue("No PDF version",
"""Can I have a printed version of your book? Please!""")
if __name__ == '__main__':
new_issue("Does not work with Python 2.7 or earlier",
"""I was deeply disappointed that your hew book requires Python 3.6 or later.
There are still several Python 2.x users out here (I, for one, cannot stand having to
type parentheses for every `print` statement), and I would love to run your code on
my Python 2.7 programs.
Would it be possible to backport the book's code such that it would run on Python 3.x
as well as Python 2.x? I would suggest that you add simple checks around your code
such as the following:
```
import sys
if sys.version_info.major >= 3:
print("The result is", x)
else:
print "The result is", x
```
As an alternative, rewrite the book in Python 2 and have it automatically translate to
Python 3. This way, you could address all Python lovers, not just Python 3 ones.
""")
if __name__ == '__main__':
new_issue("Support for C++",
"""I had lots of fun with your 'debugging book'. Yet, I was somewhat disappointed
to see that all code examples are in and for Python programs only. Is there a chance
to get them to work on a real programming language such as C or C++? This would also
open the way to discuss several new debugging techniques for bugs that occur in these
languages only. A chapter on C++ move semantics, and how to fix them, for instance,
would be highly appreciated.
""")
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
## Managing Issues
## ---------------
if __name__ == '__main__':
print('\n## Managing Issues')
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/projects/debuggingbook")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects/debuggingbook/issues')
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//tr[@id='issue-2']//a[@title='Actions']").click()
time.sleep(0.25)
if __name__ == '__main__':
tracker_item = redmine_gui.find_element_by_xpath(
"//div[@id='context-menu']//a[text()='Tracker']")
actions = webdriver.ActionChains(redmine_gui)
actions.move_to_element(tracker_item)
actions.perform()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//div[@id='context-menu']//a[text()='Feature']").click()
def mark_tracker(issue: int, tracker: str) -> None:
redmine_gui.get(redmine_url + "/issues/")
redmine_gui.find_element_by_xpath(
f"//tr[@id='issue-{str(issue)}']//a[@title='Actions']").click()
time.sleep(0.25)
tracker_item = redmine_gui.find_element_by_xpath(
"//div[@id='context-menu']//a[text()='Tracker']")
actions = webdriver.ActionChains(redmine_gui)
actions.move_to_element(tracker_item)
actions.perform()
time.sleep(0.25)
redmine_gui.find_element_by_xpath(
f"//div[@id='context-menu']//a[text()='{tracker}']").click()
if __name__ == '__main__':
mark_tracker(3, "Feature")
mark_tracker(4, "Feature")
mark_tracker(6, "Feature")
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
## Assigning Priorities
## --------------------
if __name__ == '__main__':
print('\n## Assigning Priorities')
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//tr[@id='issue-1']//a[@title='Actions']").click()
time.sleep(0.25)
if __name__ == '__main__':
priority_item = redmine_gui.find_element_by_xpath("//div[@id='context-menu']//a[text()='Priority']")
actions = webdriver.ActionChains(redmine_gui)
actions.move_to_element(priority_item)
actions.perform()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//div[@id='context-menu']//a[text()='Urgent']").click()
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
## Assigning Issues
## ----------------
if __name__ == '__main__':
print('\n## Assigning Issues')
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//tr[@id='issue-1']//a[@title='Actions']").click()
time.sleep(0.25)
if __name__ == '__main__':
assignee_item = redmine_gui.find_element_by_xpath(
"//div[@id='context-menu']//a[text()='Assignee']")
actions = webdriver.ActionChains(redmine_gui)
actions.move_to_element(assignee_item)
actions.perform()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//div[@id='context-menu']//a[text()='<< me >>']").click()
screenshot(redmine_gui)
## Resolving Issues
## ----------------
if __name__ == '__main__':
print('\n## Resolving Issues')
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/projects/debuggingbook/issues?query_id=1")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/1")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/1/edit")
redmine_gui.find_element_by_id("issue_status_id").click()
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//option[text()='Resolved']").click()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
issue_notes = redmine_gui.find_element_by_id("issue_notes")
issue_notes.send_keys("Will only work for Nokia Communicator Rev B and later; "
"Rev A is still unsupported")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_name("commit").click()
screenshot(redmine_gui)
## The Life Cycle of an Issue
## --------------------------
if __name__ == '__main__':
print('\n## The Life Cycle of an Issue')
### Resolutions
if __name__ == '__main__':
print('\n### Resolutions')
#### FIXED
if __name__ == '__main__':
print('\n#### FIXED')
#### INVALID
if __name__ == '__main__':
print('\n#### INVALID')
#### WONTFIX
if __name__ == '__main__':
print('\n#### WONTFIX')
#### DUPLICATE
if __name__ == '__main__':
print('\n#### DUPLICATE')
#### WORKSFORME
if __name__ == '__main__':
print('\n#### WORKSFORME')
### An Issue Life Cycle
if __name__ == '__main__':
print('\n### An Issue Life Cycle')
from .Intro_Debugging import graph # minor dependency
if __name__ == '__main__':
from IPython.display import display
if __name__ == '__main__':
life_cycle = graph()
life_cycle.attr(rankdir='TB')
life_cycle.node('New', label="<<b>NEW</b>>", penwidth='2.0')
life_cycle.node('Assigned', label="<<b>ASSIGNED</b>>")
with life_cycle.subgraph() as res:
res.attr(rank='same')
res.node('Resolved', label="<<b>RESOLVED</b>>", penwidth='2.0')
res.node('Resolution',
shape='plain',
fillcolor='white',
label="""<<b>Resolution:</b> One of<br align="left"/>
• FIXED<br align="left"/>
• INVALID<br align="left"/>
• DUPLICATE<br align="left"/>
• WONTFIX<br align="left"/>
• WORKSFORME<br align="left"/>
>""")
res.node('Reopened', label="<<b>REOPENED</b>>", style='invis')
life_cycle.edge('New', 'Assigned', label=r"Assigned\lto developer")
life_cycle.edge('Assigned', 'Resolved', label="Developer has fixed bug")
life_cycle.edge('Resolution', 'Resolved', arrowhead='none', style='dashed')
life_cycle
if __name__ == '__main__':
life_cycle.node('Unconfirmed', label="<<b>UNCONFIRMED</b>>", penwidth='2.0')
# life_cycle.node('Verified', label="<<b>VERIFIED</b>>")
life_cycle.node('Closed', label="<<b>CLOSED</b>>", penwidth='2.0')
life_cycle.node('Reopened', label="<<b>REOPENED</b>>", style='filled')
life_cycle.node('New', label="<<b>NEW</b>>", penwidth='1.0')
life_cycle.edge('Unconfirmed', 'New', label="Confirmed as \"new\"")
life_cycle.edge('Unconfirmed', 'Closed', label=r"Resolved\las \"invalid\"\lor \"duplicate\"")
life_cycle.edge('Assigned', 'New', label="Unassigned")
life_cycle.edge('Resolved', 'Closed', label=r"Quality Assurance\lconfirms fix")
life_cycle.edge('Resolved', 'Reopened', label=r"Quality Assurance\lnot satisfied")
life_cycle.edge('Reopened', 'Assigned', label=r"Assigned\lto developer")
# life_cycle.edge('Verified', 'Closed', label="Bug is closed")
life_cycle.edge('Closed', 'Reopened', label=r"Bug is\lreopened")
life_cycle
if __name__ == '__main__':
redmine_process.terminate()
redmine_gui.close()
if __name__ == '__main__':
os.system("pkill ruby");
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Lessons Learned
## ---------------
if __name__ == '__main__':
print('\n## Lessons Learned')
## Next Steps
## ----------
if __name__ == '__main__':
print('\n## Next Steps')
## Background
## ----------
if __name__ == '__main__':
print('\n## Background')
## Exercises
## ---------
if __name__ == '__main__':
print('\n## Exercises')
| 27.675483 | 131 | 0.671972 |
74a986e80b80360fabb0d6a93f47c35e73f23bc0 | 856 | py | Python | example_code/assigned_functions_to_reactions.py | SysSynBio/PyFBA | 433ce72d9d4bc6ad2cd0f30f16e61439a338f2ab | [
"MIT"
] | 1 | 2021-12-10T03:20:35.000Z | 2021-12-10T03:20:35.000Z | example_code/assigned_functions_to_reactions.py | SysSynBio/PyFBA | 433ce72d9d4bc6ad2cd0f30f16e61439a338f2ab | [
"MIT"
] | null | null | null | example_code/assigned_functions_to_reactions.py | SysSynBio/PyFBA | 433ce72d9d4bc6ad2cd0f30f16e61439a338f2ab | [
"MIT"
] | null | null | null | import PyFBA
import argparse
import os
import sys
parser=argparse.ArgumentParser(description='Convert an assigned_functions file to a list of roles')
parser.add_argument('-a', help='assigned functions file')
parser.add_argument('-r', help='roles file (one per line')
parser.add_argument('-v', help='verbose', action='store_true')
args = parser.parse_args()
if args.a:
af = PyFBA.parse.read_assigned_functions(args.a)
# roles = set(af.values())
roles = set()
[roles.update(i) for i in af.values()]
elif args.r:
roles = set()
with open(args.r, 'r') as f:
for l in f:
roles.update(PyFBA.parse.roles_of_function(l.strip()))
else:
sys.exit('Either -a or -r must be specified')
rc = PyFBA.filters.roles_to_reactions(roles)
reactions = set()
for r in rc:
reactions.update(rc[r])
print("\n".join(reactions))
| 27.612903 | 99 | 0.689252 |
29f71e6bc91a7ee7eb17aa9b401dc30f7697dc65 | 3,805 | py | Python | dialogue-engine/src/programy/oob/defaults/alarm.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 104 | 2020-03-30T09:40:00.000Z | 2022-03-06T22:34:25.000Z | dialogue-engine/src/programy/oob/defaults/alarm.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 25 | 2020-06-12T01:36:35.000Z | 2022-02-19T07:30:44.000Z | dialogue-engine/src/programy/oob/defaults/alarm.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 10 | 2020-04-02T23:43:56.000Z | 2021-05-14T13:47:01.000Z | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.oob.defaults.oob import OutOfBandProcessor
class AlarmOutOfBandProcessor(OutOfBandProcessor):
"""
<oob>
<alarm><message><star/></message><get name="sraix"/></alarm>
</oob>
<oob>
<alarm><hour>11</hour><minute>30</minute></alarm>
</oob>
"""
def __init__(self):
OutOfBandProcessor.__init__(self)
self._hour = None
self._min = None
self._message = None
def parse_oob_xml(self, oob):
if oob is not None:
for child in oob:
if child.tag == 'hour':
self._hour = child.text
elif child.tag == 'minute':
self._min = child.text
elif child.tag == 'message':
self._message = child.text
else:
YLogger.error(self, "Unknown child element [%s] in alarm oob", child.tag)
if self._hour is not None and self._min is not None:
return True
if self._message is not None:
return True
YLogger.error(self, "Invalid alarm oob command, either hour,min or message ")
return False
def execute_oob_command(self, client_context):
if self._message is not None:
YLogger.debug(client_context, "AlarmOutOfBandProcessor: Showing alarm=%s", self._message)
elif self._hour is not None and self._min is not None:
YLogger.debug(client_context, "AlarmOutOfBandProcessor: Setting alarm for %s:%s", self._hour, self._min)
return "ALARM"
| 47.5625 | 126 | 0.706702 |
7ae0e0dbea30e6540dc65eea8724fec26421af25 | 4,255 | py | Python | bot.py | samkiani/discord_member_count_bot | c67f0675a6788083f2b18663f1bed51dbd65c3bf | [
"BSD-3-Clause"
] | 1 | 2022-01-09T04:05:49.000Z | 2022-01-09T04:05:49.000Z | bot.py | samkiani/discord_member_count_bot | c67f0675a6788083f2b18663f1bed51dbd65c3bf | [
"BSD-3-Clause"
] | 1 | 2022-03-25T07:09:42.000Z | 2022-03-25T07:14:34.000Z | bot.py | samkiani/discord_member_count_bot | c67f0675a6788083f2b18663f1bed51dbd65c3bf | [
"BSD-3-Clause"
] | 7 | 2021-05-01T15:54:00.000Z | 2022-03-29T18:50:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Konstantin (k0nze) Lübeck"
__copyright__ = "Copyright 2021, Discord Member Count Bot"
__credits__ = ["Alex Ronquillo: https://realpython.com/how-to-make-a-discord-bot-python/"]
__license__ = "BSD 3-Clause License"
__version__ = "0.1"
__contact__ = {
"Twitch": "https://twitch.tv/k0nze",
"Youtube": "https://youtube.com/k0nze",
"Patreon": "https://www.patreon.com/k0nze",
"Twitter": "https://twitter.com/k0nze_gg",
"TikTok": "https://www.tiktok.com/@k0nze.gg",
"Discord": "https://discord.k0nze.gg",
}
import os
import json
import discord
import random
from os.path import join, dirname
from dotenv import load_dotenv
from discord.utils import get
from discord.ext import commands
# load .env file
dir_path = os.path.dirname(os.path.realpath(__file__))
dotenv_path = join(dir_path, '.env')
load_dotenv(dotenv_path)
DISCORD_TOKEN = os.environ.get('DISCORD_TOKEN')
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
# get path to data.json file
JSON_FILE = str(os.path.dirname(os.path.realpath(__file__))) + '/data.json'
# enable discord gateway intents
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix='!', intents=intents)
@bot.event
async def on_ready():
""" Runs once the bot has established a connection with Discord """
print(f'{bot.user.name} has connected to Discord')
# check if bot has connected to guilds
if len(bot.guilds) > 0:
print('connected to the following guilds:')
# list guilds
for guild in bot.guilds:
# display guild name, id and member count
print(f'* {guild.name}#{guild.id}, member count: {len(guild.members)}')
# update the member count
await update_member_count_channel_name(guild)
@bot.event
async def on_member_join(member):
""" gets triggered when a new member joins a guild """
print(f"* {member} joined {member.guild}")
await update_member_count_channel_name(member.guild)
@bot.event
async def on_member_remove(member):
""" gets triggered when a new member leaves or gets removed from a guild """
print(f"* {member} left {member.guild}")
await update_member_count_channel_name(member.guild)
@bot.command(name="update")
async def on_update_cmd(ctx):
""" triggers manual update of member count channel """
print(f"* {ctx.author} issued update")
await update_member_count_channel_name(ctx.guild)
async def update_member_count_channel_name(guild):
""" updates the name of the member count channel """
member_count_channel_id = get_guild_member_count_channel_id(guild)
member_count_suffix = get_guild_member_count_suffix(guild)
if member_count_channel_id != None and member_count_suffix != None:
member_count_channel = discord.utils.get(guild.channels, id=member_count_channel_id)
new_name = f"{get_guild_member_count(guild)} {member_count_suffix}"
await member_count_channel.edit(name=new_name)
else:
print(f"* could not update member count channel for {guild}, id not found in {JSON_FILE}")
def get_guild_member_count(guild):
""" returns the member count of a guild """
return len(guild.members)
def get_guild_member_count_channel_id(guild):
""" returns the channel id for the channel that should display the member count """
with open(JSON_FILE) as json_file:
# open JSON file
data = json.load(json_file)
for data_guild in data['guilds']:
if int(data_guild['id']) == guild.id:
return data_guild['channel_id']
return None
def get_guild_member_count_suffix(guild):
""" returns the the suffix that should be displayed after the member count """
with open(JSON_FILE) as json_file:
# open JSON file
data = json.load(json_file)
for data_guild in data['guilds']:
if int(data_guild['id']) == guild.id:
return data_guild['suffix']
return None
if __name__ == "__main__":
# launch bot
bot.run(DISCORD_TOKEN) | 32.234848 | 99 | 0.670035 |
4f5481a03cdde995f3365b9fa6dba88de8a881a9 | 225 | py | Python | sinoera/solarterm/greatcold.py | sinotradition/sinoera | 1e93482c0a56a8917bc7ceebeef5b63b24ca3651 | [
"Apache-2.0"
] | 1 | 2015-12-14T15:14:35.000Z | 2015-12-14T15:14:35.000Z | sinoera/solarterm/greatcold.py | sinotradition/sinoera | 1e93482c0a56a8917bc7ceebeef5b63b24ca3651 | [
"Apache-2.0"
] | null | null | null | sinoera/solarterm/greatcold.py | sinotradition/sinoera | 1e93482c0a56a8917bc7ceebeef5b63b24ca3651 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@contact: sinotradition@gmail.com
@copyright: License according to the project license.
'''
NAME='GreatCold'
SPELL='dàhán'
CN='大寒'
SEQ='24'
if __name__=='__main__':
pass
| 13.235294 | 53 | 0.706667 |
8d6cff8664800416fa88d27c291f1bf4a22d2bb8 | 18,802 | py | Python | elastic/tests/test_elastic.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | elastic/tests/test_elastic.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | elastic/tests/test_elastic.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
from copy import deepcopy
import pytest
import requests
from six import iteritems
from datadog_checks.base import ConfigurationError
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.elastic import ESCheck
from datadog_checks.elastic.config import from_instance
from datadog_checks.elastic.metrics import (
CAT_ALLOCATION_METRICS,
CLUSTER_PENDING_TASKS,
STATS_METRICS,
ADDITIONAL_METRICS_1_x,
health_stats_for_version,
index_stats_for_version,
pshard_stats_for_version,
slm_stats_for_version,
stats_for_version,
)
from .common import CLUSTER_TAG, IS_OPENSEARCH, JVM_RATES, PASSWORD, URL, USER
log = logging.getLogger('test_elastic')
@pytest.mark.unit
def test__join_url():
instance = {
"url": "https://localhost:9444/elasticsearch-admin",
"admin_forwarder": True,
}
check = ESCheck('elastic', {}, instances=[instance])
adm_forwarder_joined_url = check._join_url("/stats", admin_forwarder=True)
assert adm_forwarder_joined_url == "https://localhost:9444/elasticsearch-admin/stats"
joined_url = check._join_url("/stats", admin_forwarder=False)
assert joined_url == "https://localhost:9444/stats"
@pytest.mark.parametrize(
'instance, url_fix',
[
pytest.param({'url': URL}, '_local/'),
pytest.param({'url': URL, "cluster_stats": True, "slm_stats": True}, ''),
],
)
@pytest.mark.unit
def test__get_urls(instance, url_fix):
elastic_check = ESCheck('elastic', {}, instances=[instance])
health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url = elastic_check._get_urls([])
assert health_url == '/_cluster/health'
assert stats_url == '/_cluster/nodes/' + url_fix + 'stats?all=true'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url is None
assert slm_url is None
health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url = elastic_check._get_urls([1, 0, 0])
assert health_url == '/_cluster/health'
assert stats_url == '/_nodes/' + url_fix + 'stats?all=true'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url == '/_cluster/pending_tasks'
assert slm_url is None
health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url = elastic_check._get_urls([6, 0, 0])
assert health_url == '/_cluster/health'
assert stats_url == '/_nodes/' + url_fix + 'stats'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url == '/_cluster/pending_tasks'
assert slm_url is None
health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url = elastic_check._get_urls([7, 4, 0])
assert health_url == '/_cluster/health'
assert stats_url == '/_nodes/' + url_fix + 'stats'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url == '/_cluster/pending_tasks'
assert slm_url == ('/_slm/policy' if instance.get('slm_stats') is True else None)
@pytest.mark.integration
def test_custom_queries_valid_metrics(dd_environment, dd_run_check, instance, aggregator):
custom_queries = [
{
'endpoint': '/_nodes',
'data_path': '_nodes',
'columns': [
{
'value_path': 'total',
'name': 'elasticsearch.custom.metric',
},
{'value_path': 'total', 'name': 'elasticsearch.custom.metric2', 'type': 'monotonic_count'},
],
},
]
instance = deepcopy(instance)
instance['custom_queries'] = custom_queries
check = ESCheck('elastic', {}, instances=[instance])
dd_run_check(check)
aggregator.assert_metric('elasticsearch.custom.metric2', metric_type=aggregator.MONOTONIC_COUNT)
aggregator.assert_metric('elasticsearch.custom.metric', metric_type=aggregator.GAUGE)
@pytest.mark.integration
def test_custom_queries_one_invalid(dd_environment, dd_run_check, instance, aggregator):
custom_queries = [
{
# Wrong endpoint
'endpoint': '/_nodes2',
'data_path': '_nodes',
'columns': [
{
'value_path': 'total',
'name': 'elasticsearch.custom.metric2',
},
],
},
{
# Good endpoint
'endpoint': '/_nodes',
'data_path': '_nodes',
'columns': [
{
'value_path': 'total',
'name': 'elasticsearch.custom.metric',
},
],
},
]
instance = deepcopy(instance)
instance['custom_queries'] = custom_queries
check = ESCheck('elastic', {}, instances=[instance])
dd_run_check(check)
aggregator.assert_metric('elasticsearch.custom.metric', metric_type=aggregator.GAUGE)
@pytest.mark.integration
def test_custom_queries_with_payload(dd_environment, dd_run_check, instance, aggregator, cluster_tags):
custom_queries = [
{
'endpoint': '/_search',
'data_path': 'hits.total',
'payload': {"query": {"match": {"phrase": {"query": ""}}}},
'columns': [
{
'value_path': 'value',
'name': 'elasticsearch.custom.metric',
},
{'value_path': 'relation', 'name': 'dynamic_tag', 'type': 'tag'},
],
},
]
instance = deepcopy(instance)
instance['custom_queries'] = custom_queries
check = ESCheck('elastic', {}, instances=[instance])
dd_run_check(check)
tags = cluster_tags + ['dynamic_tag:eq']
aggregator.assert_metric('elasticsearch.custom.metric', metric_type=aggregator.GAUGE, tags=tags)
@pytest.mark.integration
def test_custom_queries_valid_tags(dd_environment, dd_run_check, instance, aggregator, cluster_tags):
custom_queries = [
{
'endpoint': '/_nodes',
'data_path': '_nodes',
'columns': [
{
'value_path': 'total',
'name': 'elasticsearch.custom.metric',
},
{'value_path': 'total', 'name': 'dynamic_tag', 'type': 'tag'},
],
'tags': ['custom_tag:1'],
},
]
instance = deepcopy(instance)
instance['custom_queries'] = custom_queries
check = ESCheck('elastic', {}, instances=[instance])
dd_run_check(check)
tags = cluster_tags + ['custom_tag:1'] + ['dynamic_tag:1']
aggregator.assert_metric('elasticsearch.custom.metric', metric_type=aggregator.GAUGE, tags=tags)
@pytest.mark.integration
def test_custom_queries_non_existent_metrics(caplog, dd_environment, dd_run_check, instance, aggregator):
custom_queries = [
{
'endpoint': '/_nodes',
'data_path': '_nodes',
'columns': [
{
'value_path': 'totals', # nonexistent elasticsearch metric
'name': 'elasticsearch.custom.metric',
},
],
'tags': ['custom_tag:1'],
},
]
instance = deepcopy(instance)
instance['custom_queries'] = custom_queries
check = ESCheck('elastic', {}, instances=[instance])
caplog.clear()
with caplog.at_level(logging.DEBUG):
dd_run_check(check)
aggregator.assert_metric('elasticsearch.custom.metric', count=0)
assert 'Metric not found: _nodes.totals -> elasticsearch.custom.metric' in caplog.text
@pytest.mark.integration
def test_custom_queries_non_existent_tags(caplog, dd_environment, dd_run_check, instance, aggregator, cluster_tags):
custom_queries = [
{
'endpoint': '/_nodes',
'data_path': '_nodes',
'columns': [
{
'value_path': 'total',
'name': 'elasticsearch.custom.metric',
},
{
'value_path': 'totals', # nonexistent elasticsearch metric as tag
'name': 'nonexistent_tag',
'type': 'tag',
},
],
},
]
instance = deepcopy(instance)
instance['custom_queries'] = custom_queries
check = ESCheck('elastic', {}, instances=[instance])
caplog.clear()
with caplog.at_level(logging.DEBUG):
dd_run_check(check)
aggregator.assert_metric('elasticsearch.custom.metric', count=1, tags=cluster_tags)
assert 'Dynamic tag is null: _nodes.total -> nonexistent_tag' in caplog.text
@pytest.mark.integration
def test_check(dd_environment, elastic_check, instance, aggregator, cluster_tags, node_tags):
elastic_check.check(None)
_test_check(elastic_check, instance, aggregator, cluster_tags, node_tags)
@pytest.mark.skipif(IS_OPENSEARCH, reason='Test unavailable for OpenSearch')
@pytest.mark.integration
def test_check_slm_stats(dd_environment, instance, aggregator, cluster_tags, node_tags, slm_tags):
slm_instance = deepcopy(instance)
slm_instance['slm_stats'] = True
elastic_check = ESCheck('elastic', {}, instances=[slm_instance])
elastic_check.check(None)
_test_check(elastic_check, slm_instance, aggregator, cluster_tags, node_tags)
# SLM stats
slm_metrics = slm_stats_for_version(elastic_check._get_es_version())
for m_name in slm_metrics:
aggregator.assert_metric(m_name, at_least=1, tags=slm_tags)
@pytest.mark.integration
def test_disable_cluster_tag(dd_environment, instance, aggregator, new_cluster_tags):
disable_instance = deepcopy(instance)
disable_instance['disable_legacy_cluster_tag'] = True
elastic_check = ESCheck('elastic', {}, instances=[disable_instance])
elastic_check.check(None)
es_version = elastic_check._get_es_version()
# cluster stats
expected_metrics = health_stats_for_version(es_version)
expected_metrics.update(CLUSTER_PENDING_TASKS)
for m_name in expected_metrics:
aggregator.assert_metric(m_name, at_least=1, tags=new_cluster_tags)
@pytest.mark.integration
def test_jvm_gc_rate_metrics(dd_environment, instance, aggregator, cluster_tags, node_tags):
instance['gc_collectors_as_rate'] = True
check = ESCheck('elastic', {}, instances=[instance])
check.check(instance)
for metric in JVM_RATES:
aggregator.assert_metric(metric, at_least=1, tags=node_tags)
_test_check(check, instance, aggregator, cluster_tags, node_tags)
def _test_check(elastic_check, instance, aggregator, cluster_tags, node_tags):
config = from_instance(instance)
es_version = elastic_check._get_es_version()
# node stats, blacklist metrics that can't be tested in a small, single node instance
blacklist = ['elasticsearch.indices.segments.index_writer_max_memory_in_bytes']
blacklist.extend(ADDITIONAL_METRICS_1_x)
for m_name in stats_for_version(es_version):
if m_name in blacklist:
continue
aggregator.assert_metric(m_name, at_least=1, tags=node_tags)
# cluster stats
expected_metrics = health_stats_for_version(es_version)
expected_metrics.update(CLUSTER_PENDING_TASKS)
for m_name in expected_metrics:
aggregator.assert_metric(m_name, at_least=1, tags=cluster_tags)
aggregator.assert_service_check('elasticsearch.can_connect', status=ESCheck.OK, tags=config.service_check_tags)
# Assert service metadata
# self.assertServiceMetadata(['version'], count=3)
# FIXME: 0.90.13 returns randomly a red status instead of yellow,
# so we don't do a coverage test for it
# Remove me when we stop supporting 0.90.x (not supported anymore by ES)
if es_version != [0, 90, 13]:
# Warning because elasticsearch status should be yellow, according to
# http://chrissimpson.co.uk/elasticsearch-yellow-cluster-status-explained.html
aggregator.assert_service_check('elasticsearch.cluster_health')
@pytest.mark.integration
def test_node_name_as_host(dd_environment, instance_normalize_hostname, aggregator, node_tags):
elastic_check = ESCheck('elastic', {}, instances=[instance_normalize_hostname])
elastic_check.check(None)
node_name = node_tags[-1].split(':')[1]
for m_name, _ in iteritems(STATS_METRICS):
aggregator.assert_metric(m_name, count=1, tags=node_tags, hostname=node_name)
@pytest.mark.integration
def test_pshard_metrics(dd_environment, aggregator):
instance = {'url': URL, 'pshard_stats': True, 'username': USER, 'password': PASSWORD, 'tls_verify': False}
elastic_check = ESCheck('elastic', {}, instances=[instance])
es_version = elastic_check._get_es_version()
elastic_check.check(None)
pshard_stats_metrics = pshard_stats_for_version(es_version)
for m_name, desc in iteritems(pshard_stats_metrics):
if desc[0] == 'gauge':
aggregator.assert_metric(m_name)
# Our pshard metrics are getting sent, let's check that they're accurate
# Note: please make sure you don't install Maven on the CI for future
# elastic search CI integrations. It would make the line below fail :/
aggregator.assert_metric('elasticsearch.primaries.docs.count')
@pytest.mark.integration
def test_detailed_index_stats(dd_environment, aggregator):
instance = {
"url": URL,
"cluster_stats": True,
"pshard_stats": True,
"detailed_index_stats": True,
"tls_verify": False,
}
elastic_check = ESCheck('elastic', {}, instances=[instance])
es_version = elastic_check._get_es_version()
elastic_check.check(None)
pshard_stats_metrics = pshard_stats_for_version(es_version)
for m_name, desc in iteritems(pshard_stats_metrics):
if desc[0] == 'gauge' and desc[1].startswith('_all.'):
aggregator.assert_metric(m_name)
aggregator.assert_metric_has_tag('elasticsearch.primaries.docs.count', tag='index_name:_all')
aggregator.assert_metric_has_tag('elasticsearch.primaries.docs.count', tag='index_name:testindex')
aggregator.assert_metric_has_tag('elasticsearch.primaries.docs.count', tag='index_name:.testindex')
aggregator.assert_metrics_using_metadata(
get_metadata_metrics(),
check_metric_type=False,
exclude=[
"system.cpu.idle",
"system.load.1",
"system.load.15",
"system.load.5",
"system.mem.free",
"system.mem.total",
"system.mem.usable",
"system.mem.used",
"system.net.bytes_rcvd",
"system.net.bytes_sent",
"system.swap.free",
"system.swap.total",
"system.swap.used",
],
)
@pytest.mark.integration
def test_index_metrics(dd_environment, aggregator, instance, cluster_tags):
instance['index_stats'] = True
elastic_check = ESCheck('elastic', {}, instances=[instance])
es_version = elastic_check._get_es_version()
if es_version < [1, 0, 0]:
pytest.skip("Index metrics are only tested in version 1.0.0+")
elastic_check.check(None)
for m_name in index_stats_for_version(es_version):
aggregator.assert_metric(m_name, tags=cluster_tags + ['index_name:testindex'])
aggregator.assert_metric(m_name, tags=cluster_tags + ['index_name:.testindex'])
@pytest.mark.integration
def test_cat_allocation_metrics(dd_environment, aggregator, instance, cluster_tags):
instance['cat_allocation_stats'] = True
elastic_check = ESCheck('elastic', {}, instances=[instance])
elastic_check.check(None)
for m_name in CAT_ALLOCATION_METRICS:
aggregator.assert_metric(m_name)
@pytest.mark.integration
def test_health_event(dd_environment, aggregator):
dummy_tags = ['elastique:recherche']
instance = {'url': URL, 'username': USER, 'password': PASSWORD, 'tags': dummy_tags, 'tls_verify': False}
elastic_check = ESCheck('elastic', {}, instances=[instance])
es_version = elastic_check._get_es_version()
# Should be yellow at first
requests.put(URL + '/_settings', data='{"index": {"number_of_replicas": 100}', verify=False)
elastic_check.check(None)
if es_version < [2, 0, 0]:
assert len(aggregator.events) == 1
assert sorted(aggregator.events[0]['tags']) == sorted(set(['url:{}'.format(URL)] + dummy_tags + CLUSTER_TAG))
else:
aggregator.assert_service_check('elasticsearch.cluster_health')
@pytest.mark.integration
def test_metadata(dd_environment, aggregator, elastic_check, instance, version_metadata, datadog_agent):
elastic_check.check_id = 'test:123'
elastic_check.check(None)
datadog_agent.assert_metadata('test:123', version_metadata)
datadog_agent.assert_metadata_count(len(version_metadata))
@pytest.mark.unit
@pytest.mark.parametrize(
'instance, expected_aws_host, expected_aws_service',
[
pytest.param(
{'auth_type': 'aws', 'aws_region': 'foo', 'url': 'http://example.com'},
'example.com',
'es',
id='aws_host_from_url',
),
pytest.param(
{'auth_type': 'aws', 'aws_region': 'foo', 'aws_host': 'foo.com', 'url': 'http://example.com'},
'foo.com',
'es',
id='aws_host_custom_with_url',
),
pytest.param(
{'auth_type': 'aws', 'aws_region': 'foo', 'aws_service': 'es-foo', 'url': 'http://example.com'},
'example.com',
'es-foo',
id='aws_service_custom',
),
],
)
def test_aws_auth_url(instance, expected_aws_host, expected_aws_service):
check = ESCheck('elastic', {}, instances=[instance])
assert getattr(check.http.options.get('auth'), 'aws_host', None) == expected_aws_host
assert getattr(check.http.options.get('auth'), 'service', None) == expected_aws_service
# make sure class attribute HTTP_CONFIG_REMAPPER is not modified
assert 'aws_host' not in ESCheck.HTTP_CONFIG_REMAPPER
@pytest.mark.unit
@pytest.mark.parametrize(
'instance, expected_aws_host, expected_aws_service',
[
pytest.param({}, None, None, id='not aws auth'),
pytest.param(
{'auth_type': 'aws', 'aws_region': 'foo', 'aws_host': 'foo.com'},
'foo.com',
'es',
id='aws_host_custom_no_url',
),
],
)
def test_aws_auth_no_url(instance, expected_aws_host, expected_aws_service):
with pytest.raises(ConfigurationError):
ESCheck('elastic', {}, instances=[instance])
@pytest.mark.e2e
def test_e2e(dd_agent_check, elastic_check, instance, cluster_tags, node_tags):
aggregator = dd_agent_check(instance, rate=True)
_test_check(elastic_check, instance, aggregator, cluster_tags, node_tags)
| 36.579767 | 117 | 0.663227 |
56451baa37a14e6c04b4307419a051983cdceed1 | 5,688 | py | Python | ietf/idindex/tests.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2015-09-02T19:53:12.000Z | 2015-09-02T19:53:12.000Z | ietf/idindex/tests.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ietf/idindex/tests.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | import os
import datetime
import shutil
from django.conf import settings
from ietf.doc.models import Document, DocAlias, RelatedDocument, State, LastCallDocEvent, NewRevisionDocEvent
from ietf.name.models import DocRelationshipName
from ietf.idindex.index import all_id_txt, all_id2_txt, id_index_txt
from ietf.utils.test_utils import TestCase
from ietf.utils.test_data import make_test_data
class IndexTests(TestCase):
def setUp(self):
self.id_dir = os.path.abspath("tmp-id-dir")
if not os.path.exists(self.id_dir):
os.mkdir(self.id_dir)
settings.INTERNET_DRAFT_PATH = self.id_dir
def tearDown(self):
shutil.rmtree(self.id_dir)
def write_draft_file(self, name, size):
with open(os.path.join(self.id_dir, name), 'w') as f:
f.write("a" * size)
def test_all_id_txt(self):
draft = make_test_data()
# active in IESG process
draft.set_state(State.objects.get(type="draft", slug="active"))
draft.set_state(State.objects.get(type="draft-iesg", slug="lc"))
txt = all_id_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue(draft.get_state("draft-iesg").name in txt)
# not active in IESG process
draft.unset_state("draft-iesg")
txt = all_id_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue("Active" in txt)
# published
draft.set_state(State.objects.get(type="draft", slug="rfc"))
DocAlias.objects.create(name="rfc1234", document=draft)
txt = all_id_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue("RFC\t1234" in txt)
# replaced
draft.set_state(State.objects.get(type="draft", slug="repl"))
RelatedDocument.objects.create(
relationship=DocRelationshipName.objects.get(slug="replaces"),
source=Document.objects.create(type_id="draft", rev="00", name="draft-test-replacement"),
target=draft.docalias_set.get(name__startswith="draft"))
txt = all_id_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue("Replaced replaced by draft-test-replacement" in txt)
def test_all_id2_txt(self):
draft = make_test_data()
def get_fields(content):
self.assertTrue(draft.name + "-" + draft.rev in content)
for line in content.splitlines():
if line.startswith(draft.name + "-" + draft.rev):
return line.split("\t")
# test Active
draft.set_state(State.objects.get(type="draft", slug="active"))
draft.set_state(State.objects.get(type="draft-iesg", slug="review-e"))
NewRevisionDocEvent.objects.create(doc=draft, type="new_revision", rev=draft.rev, by=draft.ad)
self.write_draft_file("%s-%s.txt" % (draft.name, draft.rev), 5000)
self.write_draft_file("%s-%s.pdf" % (draft.name, draft.rev), 5000)
t = get_fields(all_id2_txt())
self.assertEqual(t[0], draft.name + "-" + draft.rev)
self.assertEqual(t[1], "-1")
self.assertEqual(t[2], "Active")
self.assertEqual(t[3], "Expert Review")
self.assertEqual(t[4], "")
self.assertEqual(t[5], "")
self.assertEqual(t[6], draft.latest_event(type="new_revision").time.strftime("%Y-%m-%d"))
self.assertEqual(t[7], draft.group.acronym)
self.assertEqual(t[8], draft.group.parent.acronym)
self.assertEqual(t[9], unicode(draft.ad))
self.assertEqual(t[10], draft.intended_std_level.name)
self.assertEqual(t[11], "")
self.assertEqual(t[12], ".pdf,.txt")
self.assertEqual(t[13], draft.title)
author = draft.documentauthor_set.order_by("order").get()
self.assertEqual(t[14], u"%s <%s>" % (author.author.person.name, author.author.address))
self.assertEqual(t[15], u"%s <%s>" % (draft.shepherd.person.name, draft.shepherd.address))
self.assertEqual(t[16], u"%s <%s>" % (draft.ad, draft.ad.email_address()))
# test RFC
draft.set_state(State.objects.get(type="draft", slug="rfc"))
DocAlias.objects.create(name="rfc1234", document=draft)
t = get_fields(all_id2_txt())
self.assertEqual(t[4], "1234")
# test Replaced
draft.set_state(State.objects.get(type="draft", slug="repl"))
RelatedDocument.objects.create(
relationship=DocRelationshipName.objects.get(slug="replaces"),
source=Document.objects.create(type_id="draft", rev="00", name="draft-test-replacement"),
target=draft.docalias_set.get(name__startswith="draft"))
t = get_fields(all_id2_txt())
self.assertEqual(t[5], "draft-test-replacement")
# test Last Call
draft.set_state(State.objects.get(type="draft", slug="active"))
draft.set_state(State.objects.get(type="draft-iesg", slug="lc"))
e = LastCallDocEvent.objects.create(doc=draft, type="sent_last_call", expires=datetime.datetime.now() + datetime.timedelta(days=14), by=draft.ad)
t = get_fields(all_id2_txt())
self.assertEqual(t[11], e.expires.strftime("%Y-%m-%d"))
def test_id_index_txt(self):
draft = make_test_data()
draft.set_state(State.objects.get(type="draft", slug="active"))
txt = id_index_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue(draft.title in txt)
self.assertTrue(draft.abstract[:20] not in txt)
txt = id_index_txt(with_abstracts=True)
self.assertTrue(draft.abstract[:20] in txt)
| 38.958904 | 153 | 0.638361 |
959bba163af7b22f94e9a51f02280f94b1a530ee | 12,624 | py | Python | geniushubclient/const.py | rbubley/geniushub-client | 070a04f9274a646c5861095d93650e3cbd7178c9 | [
"MIT"
] | 8 | 2019-08-15T10:43:56.000Z | 2021-10-31T12:00:45.000Z | geniushubclient/const.py | rbubley/geniushub-client | 070a04f9274a646c5861095d93650e3cbd7178c9 | [
"MIT"
] | 12 | 2019-05-19T14:47:55.000Z | 2021-08-15T13:55:25.000Z | geniushubclient/const.py | rbubley/geniushub-client | 070a04f9274a646c5861095d93650e3cbd7178c9 | [
"MIT"
] | 3 | 2019-11-03T09:18:06.000Z | 2020-03-27T12:09:18.000Z | """Python client library for the Genius Hub API."""
from types import SimpleNamespace
DEFAULT_TIMEOUT_V1 = 120
DEFAULT_TIMEOUT_V3 = 20
# see: https://docs.geniushub.co.uk/pages/viewpage.action?pageId=14221432
HUB_SW_VERSIONS = {
"Dec 31 9999": "5.3.6+",
"Jan 16 2020": "5.3.6", # confirmed in testing
"Sep 9 2019": "5.3.5", # confirmed in testing
"Jul 23 2019": "5.3.2", # confirmed in testing
"Jun 25 2019": "5.3.0",
"Dec 20 2018": "5.2.10 (beta)",
"Dec 19 2018": "5.2.10", # confirmed in testing
"Jul 11 2018": "5.2.4",
"Jan 05 2018": "5.2.2",
"Jan 01 1000": "<5.2.2",
}
API_STATUS_ERROR = {
400: "The request body or request parameters are invalid.",
401: "The authorization information is missing or invalid.",
404: "No zone/device with the specified ID was found "
"(or the state property does not exist on the specified device).",
502: "The hub is offline.",
503: "The authorization information is invalid.",
}
FOOTPRINT_MODES = {1: "super-eco", 2: "eco", 3: "comfort"}
# the following is from the vendor's javascript
ZONE_TYPE = SimpleNamespace(
Manager=1, OnOffTimer=2, ControlSP=3, ControlOnOffPID=4, TPI=5, Surrogate=6
) # from app.js, search for '.Type = {'
ITYPE_TO_TYPE = { # ZONE_TYPE_MODEL
ZONE_TYPE.Manager: "manager", # "my house"
ZONE_TYPE.OnOffTimer: "on / off", # "on / off timer"
ZONE_TYPE.ControlSP: "radiator", # "radiator room"
ZONE_TYPE.ControlOnOffPID: "wet underfloor", # "control on / off PID"
ZONE_TYPE.TPI: "hot water temperature", # "TPI"
ZONE_TYPE.Surrogate: "group", # "group"
}
TYPE_TO_ITYPE = {v: k for k, v in ITYPE_TO_TYPE.items()}
ZONE_MODE = SimpleNamespace(
Off=1,
Timer=2,
Footprint=4,
Away=8,
Boost=16,
Override=16,
Early=32,
Test=64,
Linked=128,
Other=256,
) # from app.js, search for '.ZoneModes = {'
IMODE_TO_MODE = { # MODE_MODEL
ZONE_MODE.Off: "off",
ZONE_MODE.Timer: "timer", # could also be 'sense' mode
ZONE_MODE.Footprint: "footprint",
ZONE_MODE.Away: "off", # v1 API says 'off', not 'away'
ZONE_MODE.Boost: "override",
ZONE_MODE.Early: "early",
ZONE_MODE.Test: "test",
ZONE_MODE.Linked: "linked",
ZONE_MODE.Other: "other",
}
MODE_TO_IMODE = {v: k for k, v in IMODE_TO_MODE.items()}
ZONE_FLAG = SimpleNamespace(
Frost=1,
Timer=2,
Footprint=4,
Boost=8,
Away=16,
WarmupAuto=32,
WarmupManual=64,
Reactive=128,
Linked=256,
WeatherComp=512,
Temps=1024,
TPI=2048,
) # from app.js, search for '.ZoneFlags = {'
ISSUE_TEXT = {0: "information", 1: "warning", 2: "error"}
ISSUE_DESCRIPTION = {
"manager:no_boiler_controller": "The hub does not have a boiler controller assigned",
"manager:no_boiler_comms": "The hub has lost communication with the boiler controller",
"manager:no_temp": "The hub does not have a valid temperature",
"manager:weather": "Unable to fetch the weather data", # correct
"manager:weather_data": "Weather data -",
"zone:using_weather_temp": "{zone_name} is currently using the outside temperature", # correct
"zone:using_assumed_temp": "{zone_name} is currently using the assumed temperature",
"zone:tpi_no_temp": "{zone_name} currently has no valid temperature", # correct
"node:no_comms": "The {device_type} has lost communication with the Hub",
"node:not_seen": "The {device_type} in {zone_name} can not been found by the Hub", # correct
"node:low_battery": "The battery for the {device_type} in {zone_name} is dead and needs to be replaced", # correct
"node:warn_battery": "The battery for the {device_type} is low",
"node:assignment_limit_exceeded": "{device_type} has been assigned to too many zones", # for DCR channels
} # from app.js, search for: "node:, "zone:, "manager:
# Example errors
# {'id': 'node:low_battery', 'level': 2, 'data': {'location': 'Room 2.2',
# 'nodeHash': '0x00000002A0107FFF', 'nodeID': '27', 'batteryLevel': 255}}
# {'id': 'node:not_seen', 'level': 2, 'data': {'location': 'Kitchen',
# 'nodeHash': '0x0000000000000000', 'nodeID': '4'}}
# {'id': 'zone:tpi_no_temp', 'level': 2, 'data': {'location': 'Temp'}}
# {'id': 'zone:using_weather_temp', 'level': 1, 'data': {'location': 'Test Rad'}}
IDAY_TO_DAY = {
0: "sunday",
1: "monday",
2: "tuesday",
3: "wednesday",
4: "thursday",
5: "friday",
6: "saturday",
}
ATTRS_ZONE = {
"summary_keys": ["id", "name", "output"],
"detail_keys": [
"type",
"mode",
"temperature",
"setpoint",
"occupied",
"override",
"schedule",
],
}
ATTRS_DEVICE = {
"summary_keys": ["id", "type"],
"detail_keys": ["assignedZones", "state"],
}
ATTRS_ISSUE = {"summary_keys": ["description", "level"], "detail_keys": []}
# The following MODELs are from Vendor's bower.js, search for: 'Model: [{'
# 0x0000000200040205 - DanFoss Room thermostat?
#
DEVICES_MODEL = [
{"hash": "VIRTUAL", "sku": "virtual node", "description": "Virtual Node"},
{"hash": "0x0000000000000000", "sku": "n/a", "description": "Unrecognised Device"},
{
"assignableZoneTypeIds": [3, 5],
"description": "Radiator Valve",
"deviceString": "wrv",
"hash": "0x0000000200030005",
"sku": "da-wrv-a",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Radiator Valve",
"deviceString": "wrv",
"hash": "0x0000000200040005",
"sku": "da-wrv-b",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Dual Underfloor Receiver",
"deviceString": "dur",
"hash": "0x0000000220008004",
"sku": "da-dur-a",
},
{
"assignableZoneTypeIds": [1, 3, 5],
"description": "Room Thermostat",
"deviceString": "wrt",
"hash": "0x0000000280100003",
"sku": "da-wrt-c",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Genius Valve",
"deviceString": "wrv",
"hash": "0x00000002A0107FFF",
"sku": "da-wrv-c",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Single Channel Receiver",
"deviceString": "scr",
"hash": "0x0000005900010003",
"sku": "ho-scr-c",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Electric Switch",
"deviceString": "esw",
"hash": "0x0000005900010010",
"sku": "ho-esw-d",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Dual Channel Receiver",
"deviceString": "dcr",
"hash": "0x0000005900020003",
"sku": "ho-dcr-c",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Temperature Sensor",
"deviceString": "wts",
"hash": "0x000000590002000D",
"sku": "ho-wts-a",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Electric Switch",
"deviceString": "esw",
"hash": "0x0000005900020010",
"sku": "ho-esw-d",
},
{
"assignableZoneTypeIds": [1, 3, 5],
"description": "Room Thermostat",
"deviceString": "wrt",
"hash": "0x0000005900030001",
"sku": "ho-wrt-b",
},
{
"assignableZoneTypeIds": [1, 3, 5],
"description": "Room Thermostat",
"deviceString": "wrt",
"hash": "0x0000005900050001",
"sku": "ho-wrt-d",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Single Channel Receiver",
"deviceString": "scr",
"hash": "0x0000005900050003",
"sku": "ho-scr-d",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Dual Channel Receiver",
"deviceString": "dcr",
"hash": "0x0000005900060003",
"sku": "ho-dcr-d",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Smart Plug",
"deviceString": "plg",
"hash": "0x0000006000010003",
"sku": "ev-plg-a",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Temperature Humidity Sensor",
"deviceString": "ths",
"hash": "0x0000006000010006",
"sku": "es-ths-a",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Motion Sensor",
"deviceString": "wms",
"hash": "0x0000006000020001",
"sku": "es-wms-a",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Temperature Sensor",
"deviceString": "wts",
"hash": "0x00000071035D0002",
"sku": "ls-wts-a",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "CO2 Sensor",
"deviceString": "cos",
"hash": "0x00000081000100A0",
"sku": "sa-cos-a",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Room Sensor",
"deviceString": "wrs",
"hash": "0x0000008600050002",
"sku": "al-wrs-c",
},
{
"assignableZoneTypeIds": [1],
"description": "Gas Meter Reader",
"deviceString": "umr",
"hash": "0x0000009600010010",
"sku": "nq-umr-a",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Smart Plug",
"deviceString": "plg",
"hash": "0x0000013C00010001",
"sku": "ph-plg-c",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Room Sensor",
"deviceString": "wrs",
"hash": "0x0000013C00020002",
"sku": "ph-wrs-a",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Room Sensor",
"deviceString": "wrs",
"hash": "0x0000013C000C0002",
"sku": "ph-wrs-b",
},
{
"assignableZoneTypeIds": [3, 5],
"description": "Room Sensor",
"deviceString": "wrs",
"hash": "0x0000013C000D0002",
"sku": "ph-wrs-b",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Electric Switch",
"deviceString": "esw",
"hash": "0x0000013C000F0001",
"sku": "ph-esw-b",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Electric Switch",
"deviceString": "esw",
"hash": "0x0000013C00100001",
"sku": "ph-esw-a",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Smart Plug",
"deviceString": "plg",
"hash": "0x0000013C00110001",
"sku": "ph-plg-c",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "In Wall Meter",
"deviceString": "iwm",
"hash": "0x0000013C001A0006",
"sku": "ph-iwm-a",
},
{
"assignableZoneTypeIds": [1, 2, 3, 5, 6],
"description": "Smart Plug",
"deviceString": "plg",
"hash": "0x0000015400011100",
"sku": "po-plg-b",
},
] # from bower.js, search for: 'Model: [{'
DEVICE_HASH_TO_TYPE = {d["hash"]: d["description"] for d in DEVICES_MODEL}
SKU_BY_HASH = {d["hash"]: d["sku"] for d in DEVICES_MODEL}
CHANNELS_MODEL = [
{
"id": "Switch Binary",
"description": "Output On/Off",
"slug": "outputOnOff",
"type": "Boolean",
},
{
"id": "SwitchBinary",
"description": "Output On/Off",
"slug": "outputOnOff",
"type": "Boolean",
},
{
"id": "Battery",
"description": "Battery Level",
"slug": "batteryLevel",
"type": "Number",
},
{
"id": "HEATING_1",
"description": "Set Temperature",
"slug": "setTemperature",
"type": "Number",
},
{
"id": "TEMPERATURE",
"description": "Measured Temperature",
"slug": "measuredTemperature",
"type": "Number",
},
{
"id": "LUMINANCE",
"description": "Luminance",
"slug": "luminance",
"type": "Number",
},
{
"id": "Motion",
"description": "Occupancy Trigger",
"slug": "occupancyTrigger",
"type": "Number",
},
]
STATE_ATTRS = {c["id"]: c["slug"] for c in CHANNELS_MODEL}
ZONE_KIT = SimpleNamespace( # ZONE_KIT_MODEL
Temp=1,
Valve=2,
PIR=4,
Power=8,
Switch=16,
Dimmer=32,
Alarm=64,
GlobalTemp=128,
Humidity=256,
Luminance=512,
GasMeter=1024,
CO2=2014,
) # from app.js, search for '.EquipmentTypes = {'
| 29.703529 | 119 | 0.549984 |
844850a325de0c12f0d033ea7285e334ae1196cc | 26,501 | py | Python | libcloud/compute/drivers/gandi.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 3 | 2015-09-11T15:42:16.000Z | 2021-05-12T01:10:05.000Z | libcloud/compute/drivers/gandi.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 1 | 2015-10-26T21:29:56.000Z | 2015-10-27T17:29:20.000Z | libcloud/compute/drivers/gandi.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 3 | 2016-02-08T23:38:18.000Z | 2019-11-05T00:31:34.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Gandi driver for compute
"""
import sys
from datetime import datetime
from libcloud.common.gandi import BaseGandiDriver, GandiException,\
NetworkInterface, IPAddress, Disk
from libcloud.compute.base import KeyPair
from libcloud.compute.base import StorageVolume
from libcloud.compute.types import NodeState, Provider
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
NODE_STATE_MAP = {
'running': NodeState.RUNNING,
'halted': NodeState.TERMINATED,
'paused': NodeState.TERMINATED,
'locked': NodeState.TERMINATED,
'being_created': NodeState.PENDING,
'invalid': NodeState.UNKNOWN,
'legally_locked': NodeState.PENDING,
'deleted': NodeState.TERMINATED
}
NODE_PRICE_HOURLY_USD = 0.02
INSTANCE_TYPES = {
'small': {
'id': 'small',
'name': 'Small instance',
'cpu': 1,
'memory': 256,
'disk': 3,
'bandwidth': 10240,
},
'medium': {
'id': 'medium',
'name': 'Medium instance',
'cpu': 1,
'memory': 1024,
'disk': 20,
'bandwidth': 10240,
},
'large': {
'id': 'large',
'name': 'Large instance',
'cpu': 2,
'memory': 2048,
'disk': 50,
'bandwidth': 10240,
},
'x-large': {
'id': 'x-large',
'name': 'Extra Large instance',
'cpu': 4,
'memory': 4096,
'disk': 100,
'bandwidth': 10240,
},
}
class GandiNodeDriver(BaseGandiDriver, NodeDriver):
"""
Gandi node driver
"""
api_name = 'gandi'
friendly_name = 'Gandi.net'
website = 'http://www.gandi.net/'
country = 'FR'
type = Provider.GANDI
# TODO : which features to enable ?
features = {}
def __init__(self, *args, **kwargs):
"""
@inherits: :class:`NodeDriver.__init__`
"""
super(BaseGandiDriver, self).__init__(*args, **kwargs)
def _resource_info(self, type, id):
try:
obj = self.connection.request('hosting.%s.info' % type, int(id))
return obj.object
except Exception:
e = sys.exc_info()[1]
raise GandiException(1003, e)
return None
def _node_info(self, id):
return self._resource_info('vm', id)
def _volume_info(self, id):
return self._resource_info('disk', id)
# Generic methods for driver
def _to_node(self, vm):
return Node(
id=vm['id'],
name=vm['hostname'],
state=NODE_STATE_MAP.get(
vm['state'],
NodeState.UNKNOWN
),
public_ips=vm.get('ips', []),
private_ips=[],
driver=self,
extra={
'ai_active': vm.get('ai_active'),
'datacenter_id': vm.get('datacenter_id'),
'description': vm.get('description')
}
)
def _to_nodes(self, vms):
return [self._to_node(v) for v in vms]
def _to_volume(self, disk):
extra = {'can_snapshot': disk['can_snapshot']}
return StorageVolume(
id=disk['id'],
name=disk['name'],
size=int(disk['size']),
driver=self,
extra=extra)
def _to_volumes(self, disks):
return [self._to_volume(d) for d in disks]
def list_nodes(self):
"""
Return a list of nodes in the current zone or all zones.
:return: List of Node objects
:rtype: ``list`` of :class:`Node`
"""
vms = self.connection.request('hosting.vm.list').object
ips = self.connection.request('hosting.ip.list').object
for vm in vms:
vm['ips'] = []
for ip in ips:
if vm['ifaces_id'][0] == ip['iface_id']:
ip = ip.get('ip', None)
if ip:
vm['ips'].append(ip)
nodes = self._to_nodes(vms)
return nodes
def ex_get_node(self, node_id):
"""
Return a Node object based on a node id.
:param name: The ID of the node
:type name: ``int``
:return: A Node object for the node
:rtype: :class:`Node`
"""
vm = self.connection.request('hosting.vm.info', int(node_id)).object
ips = self.connection.request('hosting.ip.list').object
vm['ips'] = []
for ip in ips:
if vm['ifaces_id'][0] == ip['iface_id']:
ip = ip.get('ip', None)
if ip:
vm['ips'].append(ip)
node = self._to_node(vm)
return node
def reboot_node(self, node):
"""
Reboot a node.
:param node: Node to be rebooted
:type node: :class:`Node`
:return: True if successful, False if not
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.reboot', int(node.id))
self._wait_operation(op.object['id'])
vm = self._node_info(int(node.id))
if vm['state'] == 'running':
return True
return False
def destroy_node(self, node):
"""
Destroy a node.
:param node: Node object to destroy
:type node: :class:`Node`
:return: True if successful
:rtype: ``bool``
"""
vm = self._node_info(node.id)
if vm['state'] == 'running':
# Send vm_stop and wait for accomplish
op_stop = self.connection.request('hosting.vm.stop', int(node.id))
if not self._wait_operation(op_stop.object['id']):
raise GandiException(1010, 'vm.stop failed')
# Delete
op = self.connection.request('hosting.vm.delete', int(node.id))
if self._wait_operation(op.object['id']):
return True
return False
def deploy_node(self, **kwargs):
"""
deploy_node is not implemented for gandi driver
:rtype: ``bool``
"""
raise NotImplementedError(
'deploy_node not implemented for gandi driver')
def create_node(self, **kwargs):
"""
Create a new Gandi node
:keyword name: String with a name for this new node (required)
:type name: ``str``
:keyword image: OS Image to boot on node. (required)
:type image: :class:`NodeImage`
:keyword location: Which data center to create a node in. If empty,
undefined behavior will be selected. (optional)
:type location: :class:`NodeLocation`
:keyword size: The size of resources allocated to this node.
(required)
:type size: :class:`NodeSize`
:keyword login: user name to create for login on machine (required)
:type login: ``str``
:keyword password: password for user that'll be created (required)
:type password: ``str``
:keyword inet_family: version of ip to use, default 4 (optional)
:type inet_family: ``int``
:keyword keypairs: IDs of keypairs or Keypairs object
:type keypairs: list of ``int`` or :class:`.KeyPair`
:rtype: :class:`Node`
"""
if not kwargs.get('login') and not kwargs.get('keypairs'):
raise GandiException(1020, "Login and password or ssh keypair "
"must be defined for node creation")
location = kwargs.get('location')
if location and isinstance(location, NodeLocation):
dc_id = int(location.id)
else:
raise GandiException(
1021, 'location must be a subclass of NodeLocation')
size = kwargs.get('size')
if not size and not isinstance(size, NodeSize):
raise GandiException(
1022, 'size must be a subclass of NodeSize')
keypairs = kwargs.get('keypairs', [])
keypair_ids = [
k if isinstance(k, int) else k.extra['id']
for k in keypairs
]
# If size name is in INSTANCE_TYPE we use new rating model
instance = INSTANCE_TYPES.get(size.id)
cores = instance['cpu'] if instance else int(size.id)
src_disk_id = int(kwargs['image'].id)
disk_spec = {
'datacenter_id': dc_id,
'name': 'disk_%s' % kwargs['name']
}
vm_spec = {
'datacenter_id': dc_id,
'hostname': kwargs['name'],
'memory': int(size.ram),
'cores': cores,
'bandwidth': int(size.bandwidth),
'ip_version': kwargs.get('inet_family', 4),
}
if kwargs.get('login') and kwargs.get('password'):
vm_spec.update({
'login': kwargs['login'],
'password': kwargs['password'], # TODO : use NodeAuthPassword
})
if keypair_ids:
vm_spec['keys'] = keypair_ids
# Call create_from helper api. Return 3 operations : disk_create,
# iface_create,vm_create
(op_disk, op_iface, op_vm) = self.connection.request(
'hosting.vm.create_from',
vm_spec, disk_spec, src_disk_id
).object
# We wait for vm_create to finish
if self._wait_operation(op_vm['id']):
# after successful operation, get ip information
# thru first interface
node = self._node_info(op_vm['vm_id'])
ifaces = node.get('ifaces')
if len(ifaces) > 0:
ips = ifaces[0].get('ips')
if len(ips) > 0:
node['ip'] = ips[0]['ip']
return self._to_node(node)
return None
def _to_image(self, img):
return NodeImage(
id=img['disk_id'],
name=img['label'],
driver=self.connection.driver
)
def list_images(self, location=None):
"""
Return a list of image objects.
:keyword location: Which data center to filter a images in.
:type location: :class:`NodeLocation`
:return: List of GCENodeImage objects
:rtype: ``list`` of :class:`GCENodeImage`
"""
try:
if location:
filtering = {'datacenter_id': int(location.id)}
else:
filtering = {}
images = self.connection.request('hosting.image.list', filtering)
return [self._to_image(i) for i in images.object]
except Exception:
e = sys.exc_info()[1]
raise GandiException(1011, e)
def _to_size(self, id, size):
return NodeSize(
id=id,
name='%s cores' % id,
ram=size['memory'],
disk=size['disk'],
bandwidth=size['bandwidth'],
price=(self._get_size_price(size_id='1') * id),
driver=self.connection.driver,
)
def _instance_type_to_size(self, instance):
return NodeSize(
id=instance['id'],
name=instance['name'],
ram=instance['memory'],
disk=instance['disk'],
bandwidth=instance['bandwidth'],
price=self._get_size_price(size_id=instance['id']),
driver=self.connection.driver,
)
def list_instance_type(self, location=None):
return [self._instance_type_to_size(instance)
for name, instance in INSTANCE_TYPES.items()]
def list_sizes(self, location=None):
"""
Return a list of sizes (machineTypes) in a zone.
:keyword location: Which data center to filter a sizes in.
:type location: :class:`NodeLocation` or ``None``
:return: List of NodeSize objects
:rtype: ``list`` of :class:`NodeSize`
"""
account = self.connection.request('hosting.account.info').object
if account.get('rating_enabled'):
# This account use new rating model
return self.list_instance_type(location)
# Look for available shares, and return a list of share_definition
available_res = account['resources']['available']
if available_res['shares'] == 0:
return None
else:
share_def = account['share_definition']
available_cores = available_res['cores']
# 0.75 core given when creating a server
max_core = int(available_cores + 0.75)
shares = []
if available_res['servers'] < 1:
# No server quota, no way
return shares
for i in range(1, max_core + 1):
share = {id: i}
share_is_available = True
for k in ['memory', 'disk', 'bandwidth']:
if share_def[k] * i > available_res[k]:
# We run out for at least one resource inside
share_is_available = False
else:
share[k] = share_def[k] * i
if share_is_available:
nb_core = i
shares.append(self._to_size(nb_core, share))
return shares
def _to_loc(self, loc):
return NodeLocation(
id=loc['id'],
name=loc['name'],
country=loc['country'],
driver=self
)
def list_locations(self):
"""
Return a list of locations (datacenters).
:return: List of NodeLocation objects
:rtype: ``list`` of :class:`NodeLocation`
"""
res = self.connection.request('hosting.datacenter.list')
return [self._to_loc(l) for l in res.object]
def list_volumes(self):
"""
Return a list of volumes.
:return: A list of volume objects.
:rtype: ``list`` of :class:`StorageVolume`
"""
res = self.connection.request('hosting.disk.list', {})
return self._to_volumes(res.object)
def ex_get_volume(self, volume_id):
"""
Return a Volume object based on a volume ID.
:param volume_id: The ID of the volume
:type volume_id: ``int``
:return: A StorageVolume object for the volume
:rtype: :class:`StorageVolume`
"""
res = self.connection.request('hosting.disk.info', volume_id)
return self._to_volume(res.object)
def create_volume(self, size, name, location=None, snapshot=None):
"""
Create a volume (disk).
:param size: Size of volume to create (in GB).
:type size: ``int``
:param name: Name of volume to create
:type name: ``str``
:keyword location: Location (zone) to create the volume in
:type location: :class:`NodeLocation` or ``None``
:keyword snapshot: Snapshot to create image from
:type snapshot: :class:`Snapshot`
:return: Storage Volume object
:rtype: :class:`StorageVolume`
"""
disk_param = {
'name': name,
'size': int(size),
'datacenter_id': int(location.id)
}
if snapshot:
op = self.connection.request('hosting.disk.create_from',
disk_param, int(snapshot.id))
else:
op = self.connection.request('hosting.disk.create', disk_param)
if self._wait_operation(op.object['id']):
disk = self._volume_info(op.object['disk_id'])
return self._to_volume(disk)
return None
def attach_volume(self, node, volume, device=None):
"""
Attach a volume to a node.
:param node: The node to attach the volume to
:type node: :class:`Node`
:param volume: The volume to attach.
:type volume: :class:`StorageVolume`
:keyword device: Not used in this cloud.
:type device: ``None``
:return: True if successful
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.disk_attach',
int(node.id), int(volume.id))
if self._wait_operation(op.object['id']):
return True
return False
def detach_volume(self, node, volume):
"""
Detaches a volume from a node.
:param node: Node which should be used
:type node: :class:`Node`
:param volume: Volume to be detached
:type volume: :class:`StorageVolume`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.disk_detach',
int(node.id), int(volume.id))
if self._wait_operation(op.object['id']):
return True
return False
def destroy_volume(self, volume):
"""
Destroy a volume.
:param volume: Volume object to destroy
:type volume: :class:`StorageVolume`
:return: True if successful
:rtype: ``bool``
"""
op = self.connection.request('hosting.disk.delete', int(volume.id))
if self._wait_operation(op.object['id']):
return True
return False
def _to_iface(self, iface):
ips = []
for ip in iface.get('ips', []):
new_ip = IPAddress(
ip['id'],
NODE_STATE_MAP.get(
ip['state'],
NodeState.UNKNOWN
),
ip['ip'],
self.connection.driver,
version=ip.get('version'),
extra={'reverse': ip['reverse']}
)
ips.append(new_ip)
return NetworkInterface(
iface['id'],
NODE_STATE_MAP.get(
iface['state'],
NodeState.UNKNOWN
),
mac_address=None,
driver=self.connection.driver,
ips=ips,
node_id=iface.get('vm_id'),
extra={'bandwidth': iface['bandwidth']},
)
def _to_ifaces(self, ifaces):
return [self._to_iface(i) for i in ifaces]
def ex_list_interfaces(self):
"""
Specific method to list network interfaces
:rtype: ``list`` of :class:`GandiNetworkInterface`
"""
ifaces = self.connection.request('hosting.iface.list').object
ips = self.connection.request('hosting.ip.list').object
for iface in ifaces:
iface['ips'] = list(
filter(lambda i: i['iface_id'] == iface['id'], ips))
return self._to_ifaces(ifaces)
def _to_disk(self, element):
disk = Disk(
id=element['id'],
state=NODE_STATE_MAP.get(
element['state'],
NodeState.UNKNOWN
),
name=element['name'],
driver=self.connection.driver,
size=element['size'],
extra={'can_snapshot': element['can_snapshot']}
)
return disk
def _to_disks(self, elements):
return [self._to_disk(el) for el in elements]
def ex_list_disks(self):
"""
Specific method to list all disk
:rtype: ``list`` of :class:`GandiDisk`
"""
res = self.connection.request('hosting.disk.list', {})
return self._to_disks(res.object)
def ex_node_attach_disk(self, node, disk):
"""
Specific method to attach a disk to a node
:param node: Node which should be used
:type node: :class:`Node`
:param disk: Disk which should be used
:type disk: :class:`GandiDisk`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.disk_attach',
int(node.id), int(disk.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_node_detach_disk(self, node, disk):
"""
Specific method to detach a disk from a node
:param node: Node which should be used
:type node: :class:`Node`
:param disk: Disk which should be used
:type disk: :class:`GandiDisk`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.disk_detach',
int(node.id), int(disk.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_node_attach_interface(self, node, iface):
"""
Specific method to attach an interface to a node
:param node: Node which should be used
:type node: :class:`Node`
:param iface: Network interface which should be used
:type iface: :class:`GandiNetworkInterface`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.iface_attach',
int(node.id), int(iface.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_node_detach_interface(self, node, iface):
"""
Specific method to detach an interface from a node
:param node: Node which should be used
:type node: :class:`Node`
:param iface: Network interface which should be used
:type iface: :class:`GandiNetworkInterface`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.iface_detach',
int(node.id), int(iface.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_snapshot_disk(self, disk, name=None):
"""
Specific method to make a snapshot of a disk
:param disk: Disk which should be used
:type disk: :class:`GandiDisk`
:param name: Name which should be used
:type name: ``str``
:rtype: ``bool``
"""
if not disk.extra.get('can_snapshot'):
raise GandiException(1021, 'Disk %s can\'t snapshot' % disk.id)
if not name:
suffix = datetime.today().strftime('%Y%m%d')
name = 'snap_%s' % (suffix)
op = self.connection.request(
'hosting.disk.create_from',
{'name': name, 'type': 'snapshot', },
int(disk.id),
)
if self._wait_operation(op.object['id']):
return True
return False
def ex_update_disk(self, disk, new_size=None, new_name=None):
"""Specific method to update size or name of a disk
WARNING: if a server is attached it'll be rebooted
:param disk: Disk which should be used
:type disk: :class:`GandiDisk`
:param new_size: New size
:type new_size: ``int``
:param new_name: New name
:type new_name: ``str``
:rtype: ``bool``
"""
params = {}
if new_size:
params.update({'size': new_size})
if new_name:
params.update({'name': new_name})
op = self.connection.request('hosting.disk.update',
int(disk.id),
params)
if self._wait_operation(op.object['id']):
return True
return False
def _to_key_pair(self, data):
key_pair = KeyPair(name=data['name'],
fingerprint=data['fingerprint'],
public_key=data.get('value', None),
private_key=data.get('privatekey', None),
driver=self, extra={'id': data['id']})
return key_pair
def _to_key_pairs(self, data):
return [self._to_key_pair(k) for k in data]
def list_key_pairs(self):
"""
List registered key pairs.
:return: A list of key par objects.
:rtype: ``list`` of :class:`libcloud.compute.base.KeyPair`
"""
kps = self.connection.request('hosting.ssh.list').object
return self._to_key_pairs(kps)
def get_key_pair(self, name):
"""
Retrieve a single key pair.
:param name: Name of the key pair to retrieve.
:type name: ``str``
:rtype: :class:`.KeyPair`
"""
filter_params = {'name': name}
kps = self.connection.request('hosting.ssh.list', filter_params).object
return self._to_key_pair(kps[0])
def import_key_pair_from_string(self, name, key_material):
"""
Create a new key pair object.
:param name: Key pair name.
:type name: ``str``
:param key_material: Public key material.
:type key_material: ``str``
:return: Imported key pair object.
:rtype: :class:`.KeyPair`
"""
params = {'name': name, 'value': key_material}
kp = self.connection.request('hosting.ssh.create', params).object
return self._to_key_pair(kp)
def delete_key_pair(self, key_pair):
"""
Delete an existing key pair.
:param key_pair: Key pair object or ID.
:type key_pair: :class.KeyPair` or ``int``
:return: True of False based on success of Keypair deletion
:rtype: ``bool``
"""
key_id = key_pair if isinstance(key_pair, int) \
else key_pair.extra['id']
success = self.connection.request('hosting.ssh.delete', key_id).object
return success
| 32.083535 | 79 | 0.541338 |
af8bbf48e665fa13d98f326a2b41526b54d5d205 | 1,589 | py | Python | brushtech/doc_events/items.py | Momscode-Technologies/brushtech | 0ad9e5b39f29ed0e75b71b0e40853bbda9e967c9 | [
"MIT"
] | null | null | null | brushtech/doc_events/items.py | Momscode-Technologies/brushtech | 0ad9e5b39f29ed0e75b71b0e40853bbda9e967c9 | [
"MIT"
] | null | null | null | brushtech/doc_events/items.py | Momscode-Technologies/brushtech | 0ad9e5b39f29ed0e75b71b0e40853bbda9e967c9 | [
"MIT"
] | 3 | 2022-03-30T04:00:34.000Z | 2022-03-30T04:11:44.000Z | import frappe
@frappe.whitelist()
def validate_item(doc, method):
item_name_based = frappe.db.get_single_value('Stock Settings', 'item_naming_by')
doc.is_stock_item = frappe.db.get_value('Item Group', doc.item_group, "is_maintain_stock")
doc.is_sales_item = frappe.db.get_value('Item Group', doc.item_group, "is_sales_item")
doc.is_purchase_item = frappe.db.get_value('Item Group', doc.item_group, "is_purchase_item")
doc.is_fixed_asset = frappe.db.get_value('Item Group', doc.item_group, "is_fixed_asset")
doc.is_sub_contracted_item = frappe.db.get_value('Item Group', doc.item_group, "supply_raw_materials_for_purchase")
doc.inspection_required_before_delivery = frappe.db.get_value('Item Group', doc.item_group, "inspection_required_before_delivery")
doc.inspection_required_before_purchase = frappe.db.get_value('Item Group', doc.item_group, "inspection_required_before_purchase")
# doc.assembled_item = frappe.db.get_value('Item Group', doc.item_group, "is_service_item")
# doc.has_serial_no = frappe.db.get_value('Item Group', doc.item_group, "has_serial_no")
if item_name_based == "Naming Series" and not doc.item_group_initial:
doc.item_group_initial = frappe.db.get_value('Item Group', doc.item_group, "item_group_initials")
if not doc.naming_series:
doc.naming_series = "ITEM-.{item_group_initial}.-.#####"
if not doc.item_group_initial:
frappe.throw("Item naming is based on Naming Series and Item Group Initial is required. Please check Item Group Initials in Item Group Master") | 63.56 | 155 | 0.753933 |
e3be6ee16833bcdfd59d1ab16c7eee69807bd01c | 536 | py | Python | apps/accounts/forms.py | sotkonstantinidis/testcircle | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | [
"Apache-2.0"
] | 3 | 2019-02-24T14:24:43.000Z | 2019-10-24T18:51:32.000Z | apps/accounts/forms.py | sotkonstantinidis/testcircle | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | [
"Apache-2.0"
] | 17 | 2017-03-14T10:55:56.000Z | 2022-03-11T23:20:19.000Z | apps/accounts/forms.py | sotkonstantinidis/testcircle | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | [
"Apache-2.0"
] | 2 | 2016-02-01T06:32:40.000Z | 2019-09-06T04:33:50.000Z | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import AuthenticationForm
import floppyforms as forms
class WocatAuthenticationForm(forms.Form, AuthenticationForm):
"""
Use floppyforms
"""
username = forms.CharField(
max_length=255, label=_(u"E-mail address"),
widget=forms.TextInput(attrs={'tabindex': 1, 'autofocus': True}))
password = forms.CharField(
label=_("Password"), widget=forms.PasswordInput(attrs={'tabindex': 2}))
| 33.5 | 79 | 0.701493 |
b22ed7bf6f8f65e7e0957f81b09549c3b99d7efa | 3,414 | py | Python | pytorch/libs/nnet/transformer/embedding.py | ishine/asv-subtools | 597dcb29a772b8113dbe7ab64f0d4cc1da298707 | [
"Apache-2.0"
] | 370 | 2020-05-31T16:28:08.000Z | 2022-03-24T07:27:50.000Z | pytorch/libs/nnet/transformer/embedding.py | ts0923/asv-subtools | a678b8f3327de0e99c445a79a9e91e5e0e006b11 | [
"Apache-2.0"
] | 35 | 2020-07-01T12:08:31.000Z | 2021-12-15T03:18:14.000Z | pytorch/libs/nnet/transformer/embedding.py | ts0923/asv-subtools | a678b8f3327de0e99c445a79a9e91e5e0e006b11 | [
"Apache-2.0"
] | 119 | 2020-06-08T11:27:09.000Z | 2022-03-31T05:31:53.000Z | # -*- coding:utf-8 -*-
# Reference: https://github.com/espnet/espnet.
"""Positonal Encoding Module."""
import math
import torch
def _pre_hook(state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""Perform pre-hook in load_state_dict for backward compatibility.
Note:
We saved self.pe until v.0.5.2 but we have omitted it later.
Therefore, we remove the item "pe" from `state_dict` for backward compatibility.
"""
k = prefix + "pe"
if k in state_dict:
state_dict.pop(k)
class PositionalEncoding(torch.nn.Module):
"""Positional encoding."""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Initialize class.
:param int d_model: embedding dim
:param float dropout_rate: dropout rate
:param int max_len: maximum input length
"""
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
self._register_load_state_dict_pre_hook(_pre_hook)
def extend_pe(self, x):
"""Reset the positional encodings."""
if self.pe is not None:
if self.pe.size(1) >= x.size(1):
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
pe = torch.zeros(x.size(1), self.d_model)
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(torch.arange(0, self.d_model, 2, dtype=torch.float32) *
-(math.log(10000.0) / self.d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, x: torch.Tensor):
"""Add positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, ...)
Returns:
torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
"""
self.extend_pe(x)
x = x * self.xscale + self.pe[:, :x.size(1)]
return self.dropout(x)
class ScaledPositionalEncoding(PositionalEncoding):
"""Scaled positional encoding module.
See also: Sec. 3.2 https://arxiv.org/pdf/1809.08895.pdf
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Initialize class.
:param int d_model: embedding dim
:param float dropout_rate: dropout rate
:param int max_len: maximum input length
"""
super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)
self.alpha = torch.nn.Parameter(torch.tensor(1.0))
def reset_parameters(self):
"""Reset parameters."""
self.alpha.data = torch.tensor(1.0)
def forward(self, x):
"""Add positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, ...)
Returns:
torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
"""
self.extend_pe(x)
x = x + self.alpha * self.pe[:, :x.size(1)]
return self.dropout(x)
| 31.036364 | 88 | 0.599297 |
51d3d9859dea36b5d48969c1f90e6b1f91154a78 | 2,090 | py | Python | nicos_tuw/xccm/setups/optic_motors.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_tuw/xccm/setups/optic_motors.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-08-18T10:55:42.000Z | 2021-08-18T10:55:42.000Z | nicos_tuw/xccm/setups/optic_motors.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | description = 'motors moving the optics tool in the experiment chamber.'
group = 'lowlevel'
tangobase = 'tango://localhost:10000/box/'
devices = dict(
opt_Tx_m = device('nicos.devices.tango.Motor',
description = 'Optics translation x-axis motor',
tangodevice = tangobase + 'optics/Tx_Motor',
lowlevel = True,
),
opt_Tx = device('nicos.devices.generic.Axis',
description = 'Optics translation x-axis',
motor = 'opt_Tx_m',
# coder = 'sample_tilt_c',
precision = 0.01,
),
opt_Ty_m = device('nicos.devices.tango.Motor',
description = 'Optics translation y-axis motor',
tangodevice = tangobase + 'optics/Ty_Motor',
lowlevel = True,
),
opt_Ty = device('nicos.devices.generic.Axis',
description = 'Optics translation x-axis',
motor = 'opt_Ty_m',
# coder = 'sample_tilt_c',
precision = 0.01,
),
opt_Tz_m = device('nicos.devices.tango.Motor',
description = 'Optics translation z-axis motor',
tangodevice = tangobase + 'optics/Tz_Motor',
lowlevel = True,
),
opt_Tz = device('nicos.devices.generic.Axis',
description = 'Optics translation z-axis',
motor = 'opt_Tz_m',
# coder = 'sample_tilt_c',
precision = 0.01,
),
opt_Rz_m = device('nicos.devices.tango.Motor',
description = 'Optics rotation z-axis motor',
tangodevice = tangobase + 'optics/Rz_Motor',
lowlevel = True,
),
opt_Rz = device('nicos.devices.generic.Axis',
description = 'Optics rotation z-axis',
motor = 'opt_Rz_m',
# coder = 'sample_tilt_c',
precision = 0.01,
),
opt_Tilt_m = device('nicos.devices.tango.Motor',
description = 'Optics tilt-axis motor',
tangodevice = tangobase + 'optics/Tilt_Motor',
lowlevel = True,
),
opt_Tilt = device('nicos.devices.generic.Axis',
description = 'Optics Tilt-axis',
motor = 'opt_Tilt_m',
# coder = 'sample_tilt_c',
precision = 0.01,
),
)
| 32.65625 | 72 | 0.6 |
e7c74eaf10e1f157ab993913992a2accf4c7a104 | 3,673 | py | Python | pymagnitude/third_party/allennlp/data/dataset_readers/sequence_tagging.py | tpeng/magnitude | aec98628b5547773ca8c4114ec6d1ad51e21b230 | [
"MIT"
] | 1,520 | 2018-03-01T13:37:49.000Z | 2022-03-25T11:40:20.000Z | pymagnitude/third_party/allennlp/data/dataset_readers/sequence_tagging.py | tpeng/magnitude | aec98628b5547773ca8c4114ec6d1ad51e21b230 | [
"MIT"
] | 87 | 2018-03-03T15:12:50.000Z | 2022-02-21T15:24:12.000Z | pymagnitude/third_party/allennlp/data/dataset_readers/sequence_tagging.py | tpeng/magnitude | aec98628b5547773ca8c4114ec6d1ad51e21b230 | [
"MIT"
] | 121 | 2018-03-03T08:40:53.000Z | 2022-03-16T05:19:38.000Z |
from __future__ import with_statement
from __future__ import absolute_import
#typing
import logging
#overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, SequenceLabelField, MetadataField, Field
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from io import open
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
DEFAULT_WORD_TAG_DELIMITER = u"###"
class SequenceTaggingDatasetReader(DatasetReader):
u"""
Reads instances from a pretokenised file where each line is in the following format:
WORD###TAG [TAB] WORD###TAG [TAB] ..... \n
and converts it into a ``Dataset`` suitable for sequence tagging. You can also specify
alternative delimiters in the constructor.
Parameters
----------
word_tag_delimiter: ``str``, optional (default=``"###"``)
The text that separates each WORD from its TAG.
token_delimiter: ``str``, optional (default=``None``)
The text that separates each WORD-TAG pair from the next pair. If ``None``
then the line will just be split on whitespace.
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Note that the `output` tags will always correspond to single token IDs based on how they
are pre-tokenised in the data file.
"""
def __init__(self,
word_tag_delimiter = DEFAULT_WORD_TAG_DELIMITER,
token_delimiter = None,
token_indexers = None,
lazy = False) :
super(SequenceTaggingDatasetReader, self).__init__(lazy)
self._token_indexers = token_indexers or {u'tokens': SingleIdTokenIndexer()}
self._word_tag_delimiter = word_tag_delimiter
self._token_delimiter = token_delimiter
#overrides
def _read(self, file_path):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, u"r") as data_file:
logger.info(u"Reading instances from lines in file at: %s", file_path)
for line in data_file:
line = line.strip(u"\n")
# skip blank lines
if not line:
continue
tokens_and_tags = [pair.rsplit(self._word_tag_delimiter, 1)
for pair in line.split(self._token_delimiter)]
tokens = [Token(token) for token, tag in tokens_and_tags]
tags = [tag for token, tag in tokens_and_tags]
yield self.text_to_instance(tokens, tags)
def text_to_instance(self, tokens , tags = None) : # type: ignore
u"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
# pylint: disable=arguments-differ
fields = {}
sequence = TextField(tokens, self._token_indexers)
fields[u"tokens"] = sequence
fields[u"metadata"] = MetadataField({u"words": [x.text for x in tokens]})
if tags is not None:
fields[u"tags"] = SequenceLabelField(tags, sequence)
return Instance(fields)
SequenceTaggingDatasetReader = DatasetReader.register(u"sequence_tagging")(SequenceTaggingDatasetReader)
| 41.738636 | 107 | 0.649333 |
23cf3f9abfdd4762c03a9e13d9bfab032115b1f7 | 2,008 | py | Python | pandapower/control/controller/trafo/USetTapControl.py | yougnen/pandapower | d206bd91e68dd03675f7fe8ddee141621ef437fc | [
"BSD-3-Clause"
] | 2 | 2021-01-04T11:41:15.000Z | 2021-01-04T11:41:26.000Z | pandapower/control/controller/trafo/USetTapControl.py | yougnen/pandapower | d206bd91e68dd03675f7fe8ddee141621ef437fc | [
"BSD-3-Clause"
] | 3 | 2021-03-29T15:14:13.000Z | 2021-05-29T09:29:19.000Z | pandapower/control/controller/trafo/USetTapControl.py | yougnen/pandapower | d206bd91e68dd03675f7fe8ddee141621ef437fc | [
"BSD-3-Clause"
] | 1 | 2021-01-27T20:47:33.000Z | 2021-01-27T20:47:33.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
from pandapower.control.controller.characteristic_control import CharacteristicControl
from pandapower.control.util.characteristic import Characteristic
class USetTapControl(CharacteristicControl):
"""
Controller that adjusts the setpoint of a local tap changer voltage control based on a load flow result (e.g. p_lv_mw, i_lv_ka etc.)
according to a defined characteristic.
INPUT:
**net** (attrdict) - Pandapower net
**cid** (int) - ID of the tap changer controller, an attribute of which is controlled
**variable** (float) - Variable from the result table that is used for the characteristic
OPTIONAL:
**in_service** (bool, True) - Indicates if the controller is currently in_service
**drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped
"""
def __init__(self, net, controller_index, characteristic_index, variable='p_hv_mw', tol=1e-3, in_service=True,
order=0, level=0, drop_same_existing_ctrl=False, matching_params=None, **kwargs):
if matching_params is None:
matching_params = {"cid": controller_index, 'variable': variable}
c = net.controller.at[controller_index, 'object']
super().__init__(net, output_element="controller", output_variable="object.vm_set_pu", output_element_index=controller_index,
input_element="res_" + c.trafotable, input_variable=variable, input_element_index=c.tid,
characteristic_index=characteristic_index, tol=tol, in_service=in_service, order=order, level=level,
drop_same_existing_ctrl=drop_same_existing_ctrl, matching_params=matching_params, **kwargs) | 55.777778 | 189 | 0.719124 |
55d8fd14ff00b58d20c52e0093722e29b131e3cb | 10,561 | py | Python | SCODE-G/code_to_text/rouge_evaluator.py | rizwan09/REDCODER | e889b3d3f37573be8418c0ac536c2201e8f1be26 | [
"MIT"
] | 22 | 2021-11-15T06:00:13.000Z | 2022-03-25T14:33:15.000Z | SCODE-G/code_to_text/rouge_evaluator.py | rizwan09/REDCODER | e889b3d3f37573be8418c0ac536c2201e8f1be26 | [
"MIT"
] | 2 | 2021-12-02T19:22:51.000Z | 2022-02-19T10:50:58.000Z | SCODE-G/code_to_text/rouge_evaluator.py | rizwan09/REDCODER | e889b3d3f37573be8418c0ac536c2201e8f1be26 | [
"MIT"
] | 2 | 2021-10-07T15:27:25.000Z | 2021-12-08T07:12:13.000Z | """
This is a modified verison of
https://raw.githubusercontent.com/google/seq2seq/master/seq2seq/metrics/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools, sys
import numpy as np
#pylint: disable=C0103
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _split_into_words(sentences):
"""Splits multiple sentences into words and flattens the result"""
return list(itertools.chain(*[_.split(" ") for _ in sentences]))
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
words = _split_into_words(sentences)
return _get_ngrams(n, words)
def _len_lcs(x, y):
"""
Returns the length of the Longest Common Subsequence between sequences x
and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""
Computes the length of the longest common subsequence (lcs) between two
strings. The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _recon_lcs(x, y):
"""
Returns the Longest Subsequence between x and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns:
sequence: LCS of x and y
"""
i, j = len(x), len(y)
table = _lcs(x, y)
def _recon(i, j):
"""private recon calculation"""
if i == 0 or j == 0:
return []
elif x[i - 1] == y[j - 1]:
return _recon(i - 1, j - 1) + [(x[i - 1], i)]
elif table[i - 1, j] > table[i, j - 1]:
return _recon(i - 1, j)
else:
return _recon(i, j - 1)
recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))
return recon_tuple
def rouge_n(evaluated_sentences, reference_sentences, n=2):
"""
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
n: Size of ngram. Defaults to 2.
Returns:
A tuple (f1, precision, recall) for ROUGE-N
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
# return overlapping_count / reference_count
return f1_score, precision, recall
def _f_p_r_lcs(llcs, m, n):
"""
Computes the LCS-based F-measure score
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta**2)) * r_lcs * p_lcs
denom = r_lcs + ((beta**2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs, p_lcs, r_lcs
def rouge_l_sentence_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (sentence level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
reference_words = _split_into_words(reference_sentences)
evaluated_words = _split_into_words(evaluated_sentences)
m = len(reference_words)
n = len(evaluated_words)
lcs = _len_lcs(evaluated_words, reference_words)
return _f_p_r_lcs(lcs, m, n)
def _union_lcs(evaluated_sentences, reference_sentence):
"""
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C. For example
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and
c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is
“w1 w2” and the longest common subsequence of r_i and c2 is “w1 w3 w5”. The
union longest common subsequence of r_i, c1, and c2 is “w1 w2 w3 w5” and
LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = set()
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
union_lcs_count = len(lcs_union)
union_lcs_value = union_lcs_count / combined_lcs_length
return union_lcs_value
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
# total number of words in reference sentences
m = len(_split_into_words(reference_sentences))
# total number of words in evaluated sentences
n = len(_split_into_words(evaluated_sentences))
union_lcs_sum_across_all_references = 0
for ref_s in reference_sentences:
union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,
ref_s)
return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)
def rouge(hypotheses, references):
"""Calculates average rouge scores for a list of hypotheses and
references"""
# Filter out hyps that are of 0 length
# hyps_and_refs = zip(hypotheses, references)
# hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]
# hypotheses, references = zip(*hyps_and_refs)
# Calculate ROUGE-1 F1, precision, recall scores
rouge_1 = [
rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)
]
rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))
# Calculate ROUGE-2 F1, precision, recall scores
rouge_2 = [
rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)
]
rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))
# Calculate ROUGE-L F1, precision, recall scores
rouge_l = [
rouge_l_sentence_level([hyp], [ref])
for hyp, ref in zip(hypotheses, references)
]
rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))
return {
"rouge_1/f_score": rouge_1_f,
"rouge_1/r_score": rouge_1_r,
"rouge_1/p_score": rouge_1_p,
"rouge_2/f_score": rouge_2_f,
"rouge_2/r_score": rouge_2_r,
"rouge_2/p_score": rouge_2_p,
"rouge_l/f_score": rouge_l_f,
"rouge_l/r_score": rouge_l_r,
"rouge_l/p_score": rouge_l_p,
}
if __name__ == '__main__':
reference_file = sys.argv[1]
prediction_file = sys.argv[2]
rouge_res = rouge(reference_file, prediction_file)
print('rouge_l/f_score:' , round(100*rouge_res['rouge_l/f_score'], 2)) | 29.66573 | 80 | 0.695294 |
598198bbfe794324500f6b4ecd4f2614420dca24 | 43,034 | py | Python | pytorch_lightning/trainer/training_loop.py | gcroci2/pytorch-lightning | eb648855110c604c547d04884f9352e8c4d81785 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/training_loop.py | gcroci2/pytorch-lightning | eb648855110c604c547d04884f9352e8c4d81785 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/training_loop.py | gcroci2/pytorch-lightning | eb648855110c604c547d04884f9352e8c4d81785 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
from functools import partial, update_wrapper
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from torch.optim import Optimizer
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.supporters import TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType
from pytorch_lightning.utilities.distributed import rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.finite_checks import detect_nan_parameters
from pytorch_lightning.utilities.grads import grad_norm
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(
self,
trainer,
max_epochs: Optional[int],
min_epochs: Optional[int],
max_steps: Optional[int],
min_steps: Optional[int],
num_sanity_val_steps: int,
):
self.trainer = trainer
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self._skip_backward = False
self._optimizer_freq_cumsum = None
self._hiddens = None
self.global_step = 0
self.current_epoch = 0
self.trainer.should_stop = False
# the total batch index across all epochs
self.total_batch_idx = 0
# the current batch index in the loop that runs over the dataloader(s)
self.batch_idx = 0
# the current split index when the batch gets split into chunks in truncated backprop through time
self.split_idx = None
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.max_steps = max_steps
self.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
@property
def num_active_optimizers(self) -> int:
return len(self.get_active_optimizers())
@property
def optimizer_freq_cumsum(self):
if self._optimizer_freq_cumsum is None:
self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
return self._optimizer_freq_cumsum
def should_skip_training(self) -> bool:
should_by_max_steps = self.max_steps is not None and self.global_step >= self.max_steps
should_by_epoch = self.max_epochs is not None and self.current_epoch >= self.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None:
self.trainer.logger.finalize("success")
# summarize profile results
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
# reset bookkeeping
self.trainer.state.stage = None
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last and cb.verbose for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.lightning_module
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.current_epoch = epoch
model = self.trainer.lightning_module
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]
processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)
# hook
self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model) -> None:
"""
Resets train and val dataloaders if none are attached to the trainer.
The val dataloader must be initialized before training loop starts, as the training loop
inspects the val dataloader to determine whether to run the evaluation loop.
"""
if self.trainer.train_dataloader is None:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
hook_overridden = self._should_add_batch_output_to_epoch_output()
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def _should_add_batch_output_to_epoch_output(self) -> bool:
# We add to the epoch outputs if
# 1. The model defines training_epoch_end OR
# 2. The model overrides on_train_epoch_end which has `outputs` in the signature
# TODO: in v1.5 this only needs to check if training_epoch_end is overridden
lightning_module = self.trainer.lightning_module
if is_overridden("training_epoch_end", model=lightning_module):
return True
if is_overridden("on_train_epoch_end", model=lightning_module):
model_hook_fx = getattr(lightning_module, "on_train_epoch_end")
if is_param_in_hook_signature(model_hook_fx, "outputs"):
return True
return False
def get_active_optimizers(self, batch_idx: Optional[int] = None) -> List[Tuple[int, Optimizer]]:
"""
Returns the currently active optimizers. When multiple optimizers are used with different frequencies,
only one of the optimizers is active at a time.
Returns:
A list of tuples (opt_idx, optimizer) of currently active optimizers.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
batch_idx = self.total_batch_idx if batch_idx is None else batch_idx
optimizers_loop_length = self.optimizer_freq_cumsum[-1]
current_place_in_loop = batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = int(np.argmax(self.optimizer_freq_cumsum > current_place_in_loop))
return [(opt_idx, self.trainer.optimizers[opt_idx])]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
training_step_output.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("model_forward"):
step_kwargs = self._build_kwargs(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator.training_step(step_kwargs)
self.trainer.accelerator.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
if training_step_output_for_epoch_end is None:
return
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.lightning_module.automatic_optimization:
# accumulate loss. if accumulate_grad_batches==1, no effect
closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
result = self.trainer.lightning_module._results
loss = None
hiddens = None
result["extra"] = {}
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
if hiddens is not None:
hiddens = hiddens.detach()
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
# map to results under the hood
result.minimize = loss
self._hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu()
return training_step_output_for_epoch_end, result
@staticmethod
def _prepare_outputs(
outputs: List[List[List[Result]]],
batch_mode: bool,
) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:
"""
Extract required information from batch or epoch end results.
Args:
outputs: A 3-dimensional list of ``Result`` objects with dimensions:
[optimizer outs][batch outs][tbptt steps].
batch_mode: If True, ignore the batch output dimension.
Returns:
The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will
be collapsed.
"""
processed_outputs = []
for opt_outputs in outputs:
# handle an edge case where an optimizer output is the empty list
if len(opt_outputs) == 0:
continue
processed_batch_outputs = []
if batch_mode:
opt_outputs = [opt_outputs]
for batch_outputs in opt_outputs:
processed_tbptt_outputs = []
for tbptt_output in batch_outputs:
out = tbptt_output.extra
out['loss'] = tbptt_output.minimize
processed_tbptt_outputs.append(out)
# if there was only one tbptt step then we can collapse that dimension
if len(processed_tbptt_outputs) == 1:
processed_tbptt_outputs = processed_tbptt_outputs[0]
processed_batch_outputs.append(processed_tbptt_outputs)
# batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer
if batch_mode:
processed_batch_outputs = processed_batch_outputs[0]
processed_outputs.append(processed_batch_outputs)
# if there is only one optimiser then we collapse that dimension
if len(processed_outputs) == 1:
processed_outputs = processed_outputs[0]
return processed_outputs
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.lightning_module
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer) -> dict:
# track gradient norms
grad_norm_dict = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator.clip_gradients(
optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm
)
return grad_norm_dict
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.lightning_module
grad_norm_dict = grad_norm(model, self.trainer.track_grad_norm)
return grad_norm_dict
def _tbptt_split_batch(self, batch: Any) -> List[Any]:
splits = [batch]
truncated_bptt_enabled = self._truncated_bptt_enabled()
if truncated_bptt_enabled:
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_active_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
batch_idx = None
is_last_batch = None
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
# hook
# TODO: add outputs to batches
self.on_train_batch_end(
epoch_output,
batch_output.training_step_output_for_epoch_end,
batch,
batch_idx,
dataloader_idx,
)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED
# -----------------------------------------
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.validating = True
self.trainer._run_evaluation()
self.trainer.training = True
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.max_steps is not None and self.max_steps <= self.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
if batch_idx is None:
# dataloader/iterator did not produce a batch
return
# handle epoch_output on epoch end
self.on_train_epoch_end(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output)
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
# update epoch level lr_schedulers if no val loop outside train loop is triggered
if not should_check_val or should_train_only:
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
if should_train_only:
self.check_checkpoint_callback(True)
if should_check_val:
self.trainer.validating = True
self.trainer._run_evaluation(on_epoch=True)
self.trainer.training = True
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None:
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
# prepare epoch output
processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)
# get the model and call model.training_epoch_end
model = self.trainer.lightning_module
if is_overridden('training_epoch_end', model=model):
# run training_epoch_end
# refresh the result for custom logging at the epoch level
model._current_fx_name = 'training_epoch_end'
# lightningmodule hook
training_epoch_end_output = model.training_epoch_end(processed_epoch_output)
if training_epoch_end_output is not None:
raise MisconfigurationException(
'training_epoch_end expects a return of None. '
'HINT: remove the return statement in training_epoch_end'
)
# capture logging
self.trainer.logger_connector.cache_logged_metrics()
# call train epoch end hooks
self._on_train_epoch_end_hook(processed_epoch_output)
self.trainer.call_hook('on_epoch_end')
def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:
# We cannot rely on Trainer.call_hook because the signatures might be different across
# lightning module and callback
# As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end`
# This implementation is copied from Trainer.call_hook
hook_name = "on_train_epoch_end"
# set hook_name to model + reset Result obj
skip = self.trainer._reset_result_and_set_hook_fx_name(hook_name)
# always profile hooks
with self.trainer.profiler.profile(hook_name):
# first call trainer hook
if hasattr(self.trainer, hook_name):
trainer_hook = getattr(self.trainer, hook_name)
trainer_hook(processed_epoch_output)
# next call hook in lightningModule
model_ref = self.trainer.lightning_module
if is_overridden(hook_name, model_ref):
hook_fx = getattr(model_ref, hook_name)
if is_param_in_hook_signature(hook_fx, "outputs"):
self.warning_cache.warn(
"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3."
" `outputs` parameter has been deprecated."
" Support for the old signature will be removed in v1.5", DeprecationWarning
)
model_ref.on_train_epoch_end(processed_epoch_output)
else:
model_ref.on_train_epoch_end()
# if the PL module doesn't have the hook then call the accelerator
# used to auto-reduce things for the user with Results obj
elif hasattr(self.trainer.accelerator, hook_name):
accelerator_hook = getattr(self.trainer.accelerator, hook_name)
accelerator_hook()
if not skip:
self.trainer._cache_logged_metrics()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dict = {}
# bookkeeping
self._hiddens = None
optimizers = list(enumerate(self.trainer.optimizers))
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(optimizers))]
if batch is None:
self.warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
return AttributeDict(
signal=0,
grad_norm_dict={},
training_step_output_for_epoch_end=batch_outputs,
)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dict={})
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dict={})
# lightning module hook
splits = self._tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
self.split_idx = split_idx
if self.trainer.lightning_module.automatic_optimization:
for opt_idx, optimizer in self.get_active_optimizers(batch_idx):
result = self._run_optimization(batch_idx, split_idx, split_batch, opt_idx, optimizer)
if result:
batch_outputs[opt_idx].append(result.training_step_output_for_epoch_end)
grad_norm_dict = result.get("grad_norm_dict", {})
else:
# in manual optimization, there is no looping over optimizers
result = self._run_optimization(batch_idx, split_idx, split_batch)
if result:
batch_outputs[0].append(result.training_step_output_for_epoch_end)
output = AttributeDict(
signal=0,
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dict=grad_norm_dict,
training_step_output_for_epoch_end=batch_outputs,
)
return output
def _run_optimization(self, batch_idx, split_idx, split_batch, opt_idx=0, optimizer=None):
# TODO: In v1.5, when optimizer_idx gets removed from training_step in manual_optimization, change
# opt_idx=0 to opt_idx=None in the signature here
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
result = AttributeDict()
closure = self.make_closure(split_batch, batch_idx, opt_idx, optimizer, self._hiddens, result)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform ddp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
closure()
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.trainer.lightning_module.automatic_optimization:
self.optimizer_step(optimizer, opt_idx, batch_idx, closure)
else:
result = self.training_step(split_batch, batch_idx, opt_idx, self._hiddens)
if not result:
# user decided to skip optimization
return result
# update running loss + reset accumulated loss
self.update_running_loss(result.loss)
self._process_closure_result(result)
return result
def training_step_and_backward_closure(
self,
split_batch: Any,
batch_idx: int,
opt_idx: int,
optimizer: Optimizer,
hiddens,
return_result: AttributeDict,
) -> Optional[torch.Tensor]:
step_result = self.training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
if step_result is not None:
return_result.update(step_result)
return return_result.loss
def make_closure(self, *closure_args, **closure_kwargs: Any) -> Callable:
""" Wraps the training step closure into a partial object which will be called within ``optimizer.step``. """
partial_func = partial(self.training_step_and_backward_closure, *closure_args, **closure_kwargs)
return update_wrapper(partial_func, self.training_step_and_backward_closure)
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.trainer.lightning_module.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, opt_closure_result: Optional[AttributeDict]) -> None:
if not opt_closure_result:
return
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(opt_closure_result.loss)
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""Wrap forward, zero_grad and backward in a closure so second order methods work"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:
is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0
if is_first_batch_to_accumulate:
self.on_before_zero_grad(optimizer)
self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
# backward pass
if result is not None:
with self.trainer.profiler.profile("backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(result.loss)
else:
self.warning_cache.warn(
"training_step returned None. If this was on purpose, ignore this warning..."
)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.lightning_module.untoggle_optimizer(opt_idx)
return result
def _check_finite(self, loss: torch.Tensor) -> None:
if not torch.isfinite(loss).all():
raise ValueError(f'The loss returned in `training_step` is {loss}.')
model = self.trainer.lightning_module
detect_nan_parameters(model)
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
result.grad_norm_dict = self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(
interval="step",
monitor_metrics=monitor_metrics,
opt_indices=[opt_idx for opt_idx, _ in self.get_active_optimizers()],
)
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.global_step = self.trainer.accelerator.update_global_step(self.total_batch_idx, self.global_step)
def _accumulated_batches_reached(self):
return (self.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool = False) -> bool:
""" Decide if we should run validation. """
if not self.trainer.enable_validation:
return False
# check if this epoch is eligible to run validation
if (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch != 0:
return False
# val_check_batch is inf for iterable datasets with no length defined
# TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch
is_val_check_batch = False
if isinstance(self.trainer.limit_train_batches, int) and self.trainer.val_check_batch == float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
# Note: num_training_batches is also inf for iterable datasets with no length defined
epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
if on_epoch:
return (
is_val_check_batch and epoch_end_val_check
) or self.trainer.should_stop or is_last_batch_for_infinite_dataset
else:
return is_val_check_batch and not epoch_end_val_check
def _build_kwargs(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
step_kwargs = OrderedDict([('batch', batch), ('batch_idx', batch_idx)])
lightning_module = self.trainer.lightning_module
if len(self.trainer.optimizers) > 1:
training_step_fx = getattr(lightning_module, "training_step")
has_opt_idx_in_train_step = is_param_in_hook_signature(training_step_fx, "optimizer_idx")
if has_opt_idx_in_train_step:
if not lightning_module.automatic_optimization:
self.warning_cache.warn(
"`training_step` hook signature has changed in v1.3."
" `optimizer_idx` argument has been removed in case of manual optimization. Support for"
" the old signature will be removed in v1.5", DeprecationWarning
)
step_kwargs['optimizer_idx'] = opt_idx
elif not has_opt_idx_in_train_step and self.trainer.lightning_module.automatic_optimization:
raise ValueError(
f"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but"
' `training_step` is missing the `optimizer_idx` argument.'
)
# pass hiddens if using tbptt
if self._truncated_bptt_enabled():
step_kwargs['hiddens'] = hiddens
return step_kwargs
def _truncated_bptt_enabled(self) -> bool:
""" Temporary tbptt utilities until this flag is fully migrated to the lightning module. """
return self._truncated_bptt_steps() > 0
def _truncated_bptt_steps(self) -> int:
lightning_module = self.trainer.lightning_module
# Give precedence to the LightningModule as the Trainer flag will be removed in v1.5
if lightning_module.truncated_bptt_steps > 0:
return lightning_module.truncated_bptt_steps
return self.trainer.truncated_bptt_steps or 0
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.lightning_module
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self, current_loss: torch.Tensor) -> None:
if self.trainer.lightning_module.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(current_loss)
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| 42.991009 | 119 | 0.663894 |
5c0388eaeb3aab3573b9d6d1d7abbc066e4d0b83 | 8,004 | py | Python | drfpasswordless/utils.py | firstTimeCaller/django-rest-framework-passwordless | 8dad95ab0b2ce33d0e9fee9455391893929ec0cf | [
"MIT"
] | null | null | null | drfpasswordless/utils.py | firstTimeCaller/django-rest-framework-passwordless | 8dad95ab0b2ce33d0e9fee9455391893929ec0cf | [
"MIT"
] | null | null | null | drfpasswordless/utils.py | firstTimeCaller/django-rest-framework-passwordless | 8dad95ab0b2ce33d0e9fee9455391893929ec0cf | [
"MIT"
] | null | null | null | import logging
import os
from django.contrib.auth import get_user_model
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.template import loader
from django.utils import timezone
from rest_framework.authtoken.models import Token
from drfpasswordless.models import CallbackToken
from drfpasswordless.settings import api_settings
logger = logging.getLogger(__name__)
User = get_user_model()
def authenticate_by_token(callback_token):
try:
token = CallbackToken.objects.get(key=callback_token, is_active=True, type=CallbackToken.TOKEN_TYPE_AUTH)
# Returning a user designates a successful authentication.
token.user = User.objects.get(pk=token.user.pk)
token.is_active = False # Mark this token as used.
token.save()
return token.user
except CallbackToken.DoesNotExist:
logger.debug("drfpasswordless: Challenged with a callback token that doesn't exist.")
except User.DoesNotExist:
logger.debug("drfpasswordless: Authenticated user somehow doesn't exist.")
except PermissionDenied:
logger.debug("drfpasswordless: Permission denied while authenticating.")
return None
def create_callback_token_for_user(user, alias_type, token_type):
token = None
alias_type_u = alias_type.upper()
if alias_type_u == 'EMAIL':
token = CallbackToken.objects.create(user=user,
to_alias_type=alias_type_u,
to_alias=getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME),
type=token_type)
elif alias_type_u == 'MOBILE':
token = CallbackToken.objects.create(user=user,
to_alias_type=alias_type_u,
to_alias=getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME),
type=token_type)
if token is not None:
return token
return None
def validate_token_age(callback_token):
"""
Returns True if a given token is within the age expiration limit.
"""
try:
token = CallbackToken.objects.get(key=callback_token, is_active=True)
seconds = (timezone.now() - token.created_at).total_seconds()
token_expiry_time = api_settings.PASSWORDLESS_TOKEN_EXPIRE_TIME
if seconds <= token_expiry_time:
return True
else:
# Invalidate our token.
token.is_active = False
token.save()
return False
except CallbackToken.DoesNotExist:
# No valid token.
return False
def verify_user_alias(user, token):
"""
Marks a user's contact point as verified depending on accepted token type.
"""
if token.to_alias_type == 'EMAIL':
if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME):
setattr(user, api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME, True)
elif token.to_alias_type == 'MOBILE':
if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME):
setattr(user, api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME, True)
else:
return False
user.save()
return True
def inject_template_context(context):
"""
Injects additional context into email template.
"""
for processor in api_settings.PASSWORDLESS_CONTEXT_PROCESSORS:
context.update(processor())
return context
def send_email_with_callback_token(user, email_token, **kwargs):
"""
Sends a Email to user.email.
Passes silently without sending in test environment
"""
try:
if api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS:
# Make sure we have a sending address before sending.
# Get email subject and message
email_subject = kwargs.get('email_subject',
api_settings.PASSWORDLESS_EMAIL_SUBJECT)
user_email = getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)
email_plaintext = kwargs.get('email_plaintext',
api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE)
if api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE_ORDERED_CONTEXT:
string_options = tuple()
for x in api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE_ORDERED_CONTEXT:
string_options = string_options + (eval(x),)
email_plaintext = email_plaintext % string_options
else:
email_plaintext = email_plaintext % email_token.key
email_html = kwargs.get('email_html',
api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME)
# Inject context if user specifies.
context = inject_template_context({'callback_token': email_token.key, 'user_email':user_email, 'request': kwargs['request']})
html_message = loader.render_to_string(email_html, context,)
send_mail(
email_subject,
email_plaintext,
api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS,
[user_email],
fail_silently=False,
html_message=html_message,)
else:
logger.debug("Failed to send token email. Missing PASSWORDLESS_EMAIL_NOREPLY_ADDRESS.")
return False
return True
except Exception as e:
logger.debug("Failed to send token email to user: %d."
"Possibly no email on user object. Email entered was %s" %
(user.id, getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)))
logger.debug(e)
return False
def send_sms_with_callback_token(user, mobile_token, **kwargs):
"""
Sends a SMS to user.mobile via Twilio.
Passes silently without sending in test environment.
"""
base_string = kwargs.get('mobile_message', api_settings.PASSWORDLESS_MOBILE_MESSAGE)
try:
if api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER:
# We need a sending number to send properly
if api_settings.PASSWORDLESS_TEST_SUPPRESSION is True:
# we assume success to prevent spamming SMS during testing.
return True
from twilio.rest import Client
twilio_client = Client(os.environ['TWILIO_ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])
to_number = getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME)
if to_number.__class__.__name__ == 'PhoneNumber':
to_number = to_number.__str__()
twilio_client.messages.create(
body=base_string % mobile_token.key,
to=to_number,
from_=api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER
)
return True
else:
logger.debug("Failed to send token sms. Missing PASSWORDLESS_MOBILE_NOREPLY_NUMBER.")
return False
except ImportError:
logger.debug("Couldn't import Twilio client. Is twilio installed?")
return False
except KeyError:
logger.debug("Couldn't send SMS."
"Did you set your Twilio account tokens and specify a PASSWORDLESS_MOBILE_NOREPLY_NUMBER?")
except Exception as e:
logger.debug("Failed to send token SMS to user: {}. "
"Possibly no mobile number on user object or the twilio package isn't set up yet. "
"Number entered was {}".format(user.id, getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME)))
logger.debug(e)
return False
def create_authentication_token(user):
""" Default way to create an authentication token"""
return Token.objects.get_or_create(user=user)
| 38.114286 | 137 | 0.653298 |
d79befe36c350d2d3458f05bc00193386bae043e | 3,485 | py | Python | src/DynamicPlot.py | tblaschke/autoencoder | ffea9fae646b869ea48f116ec08aa9e25463706b | [
"MIT"
] | 1 | 2022-03-10T13:19:10.000Z | 2022-03-10T13:19:10.000Z | src/DynamicPlot.py | tblaschke/autoencoder | ffea9fae646b869ea48f116ec08aa9e25463706b | [
"MIT"
] | null | null | null | src/DynamicPlot.py | tblaschke/autoencoder | ffea9fae646b869ea48f116ec08aa9e25463706b | [
"MIT"
] | 1 | 2022-03-10T13:19:11.000Z | 2022-03-10T13:19:11.000Z | # coding=utf-8
from rdkit import Chem
from rdkit.Chem import Draw
class DynamicPlot(object):
def __init__(self, n_steps, min, max, save_file=None):
import os
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
import matplotlib
matplotlib.use("Agg", warn=True, force=True)
self.gui = False
else:
import matplotlib
matplotlib.use("TKAgg", warn=True, force=True)
self.gui = True
from matplotlib import pyplot as plt
#plt.ion()
self.min = min
self.max = max
self.data = None
self.updated = False
self.file = save_file
self.step = 0
self.scores = [[], []]
if self.gui:
self.fig, (self.mol_ax, self.score_ax) = plt.subplots(2, 1,
figsize=(14, 8),
gridspec_kw={'height_ratios': [3.5, 1]})
self.score_ax.set_xlim(0, n_steps)
self.score_ax.set_ylim(min, max)
self.score_ax.set_ylabel(r"$\mathrm{P_{active}}$")
self.score_ax.set_xlabel(r"$\mathrm{Iteration\ Step}$")
self.mol_ax.set_title(r"$\mathrm{Generated\ Molecules}$", y=0.97)
self.mol_ax.axis("off")
plt.tight_layout()
plt.show(False)
plt.draw()
self.fig.canvas.draw()
self.fig.canvas.update()
self.fig.canvas.flush_events()
def fastupdate(self, data, smiles=None):
self.scores[0].append(data[0])
if data[1] > self.max:
self.scores[1].append(self.max)
elif data[1] < self.min:
self.scores[1].append(self.min)
else:
self.scores[1].append(data[1])
def update(self, data, smis):
if isinstance(data, float):
data = [self.step, data]
self.step += 1
if self.file is not None:
with open(self.file, "a") as fd:
smis_str = ""
for smi in smis:
smis_str = smis_str + " " + smi
fd.write("{}\t{}\t{}\n".format(self.step, data[1], smis_str))
data[1] = 1-data[1]
self.scores[0].append(data[0])
if data[1] > self.max:
self.scores[1].append(self.max)
elif data[1] < self.min:
self.scores[1].append(self.min)
else:
self.scores[1].append(data[1])
if self.gui:
if not self.updated:
self.data = self.score_ax.plot(self.scores[0],self.scores[1], "r-")[0] # Returns a tuple of line objects thus [0]
self.updated = True
self.data.set_data(self.scores)
mols = []
for smi in smis:
mol = Chem.MolFromSmiles(smi)
mols.append(mol)
if len(mols) == 8:
break
if len(mols) > 0:
try:
mol_img = Draw.MolsToGridImage(mols, subImgSize=(400, 400), molsPerRow=4)
self.mol_ax.images = []
self.mol_ax.imshow(mol_img, interpolation="bicubic")
except Exception:
pass
self.fig.canvas.draw()
self.fig.canvas.update()
self.fig.canvas.flush_events() | 33.190476 | 129 | 0.493257 |
925b0faf62212869c2dcbd74fc31f218a44ff6d1 | 1,877 | py | Python | HashTable/DesignTheKey/valid_sudoku.py | mamoudmatook/Leetcode | 59fb1612ee648a9b99ff7cc779ada5656c01ecd2 | [
"MIT"
] | null | null | null | HashTable/DesignTheKey/valid_sudoku.py | mamoudmatook/Leetcode | 59fb1612ee648a9b99ff7cc779ada5656c01ecd2 | [
"MIT"
] | null | null | null | HashTable/DesignTheKey/valid_sudoku.py | mamoudmatook/Leetcode | 59fb1612ee648a9b99ff7cc779ada5656c01ecd2 | [
"MIT"
] | null | null | null | #
# Created on Tue Oct 19 2021
#
# The MIT License (MIT)
# Copyright (c) 2021 Maatuq
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
idx = lambda r, c: (r // 3) * 3 + c // 3
N = 9
rows = [0] * N
cols = [0] * N
regions = [0] * N
for r in range(N):
for c in range(N):
if board[r][c] == ".":
continue
p = int(board[r][c]) - 1
if rows[r] & (1 << p):
return False
rows[r] |= 1 << p
if cols[c] & (1 << p):
return False
cols[c] |= 1 << p
if regions[idx(r, c)] & (1 << p):
return False
regions[idx(r, c)] |= 1 << p
return True
| 41.711111 | 122 | 0.603623 |
d9cc0f8fb59ec9a5b23014a199bda0ca2fde4c9a | 2,111 | py | Python | src/compas/com/rhino/client.py | yijiangh/compas | a9e86edf6b602f47ca051fccedcaa88a5e5d3600 | [
"MIT"
] | 1 | 2019-03-27T22:46:29.000Z | 2019-03-27T22:46:29.000Z | src/compas/com/rhino/client.py | yijiangh/compas | a9e86edf6b602f47ca051fccedcaa88a5e5d3600 | [
"MIT"
] | null | null | null | src/compas/com/rhino/client.py | yijiangh/compas | a9e86edf6b602f47ca051fccedcaa88a5e5d3600 | [
"MIT"
] | 1 | 2022-01-16T02:32:43.000Z | 2022-01-16T02:32:43.000Z | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import time
import compas
try:
from comtypes.client import CreateObject
from comtypes.client import GetModule
except ImportError:
compas.raise_if_windows()
__all__ = ['RhinoClient']
class RhinoClientError(Exception):
pass
class RhinoClient(object):
"""Communicate with Rhino through Window's COM interface.
Warning
-------
This functionality is only available on Windows.
Examples
--------
>>> rhino = RhinoClient()
>>> rhino.start()
>>> rhino.show()
>>> rhino.top()
>>> rhino.AddPoint(0, 0, 0)
<guid>
"""
def __init__(self):
self.Rhino = None
self.rs = None
def __getattr__(self, name):
if self.rs:
method = getattr(self.rs, name)
def wrapper(*args, **kwargs):
return method(*args, **kwargs)
return wrapper
else:
raise RhinoClientError()
def start(self):
Rhino_tlb = GetModule("C:/Program Files/Rhinoceros 5/System/Rhino5.tlb")
RhinoScript_tlb = GetModule("C:/Program Files/Rhinoceros 5/Plug-ins/RhinoScript.tlb")
self.Rhino = CreateObject('Rhino5x64.Application').QueryInterface(Rhino_tlb.IRhino5x64Application)
while not self.Rhino.IsInitialized():
print('Initialising Rhino...')
time.sleep(0.5)
print('Rhino initialised!')
self.rs = self.Rhino.GetScriptObject().QueryInterface(RhinoScript_tlb.IRhinoScript)
def stop(self):
raise NotImplementedError
def show(self):
self.Rhino.Visible = True
def hide(self):
self.Rhino.Visible = False
def top(self):
self.Rhino.BringToTop()
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
Rhino = RhinoClient()
Rhino.start()
Rhino.show()
Rhino.top()
Rhino.AddPoint([0, 0, 0])
| 23.197802 | 106 | 0.580767 |
1d17379ce789d70097ac4232d3be691d6c43f7d7 | 2,245 | py | Python | nova/virt/disk/mount/loop.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | 7 | 2015-09-22T11:27:16.000Z | 2015-11-02T12:33:46.000Z | nova/virt/disk/mount/loop.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | 2 | 2015-09-07T22:14:46.000Z | 2020-08-12T08:51:56.000Z | nova/virt/disk/mount/loop.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | 4 | 2015-09-09T16:48:56.000Z | 2022-03-15T20:52:57.000Z | # Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Support for mounting images with the loop device."""
from nova.i18n import _, _LI
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk.mount import api
LOG = logging.getLogger(__name__)
class LoopMount(api.Mount):
"""loop back support for raw images."""
mode = 'loop'
def _inner_get_dev(self):
out, err = utils.trycmd('losetup', '--find', '--show', self.image,
run_as_root=True)
if err:
self.error = _('Could not attach image to loopback: %s') % err
LOG.info(_LI('Loop mount error: %s'), self.error)
self.linked = False
self.device = None
return False
self.device = out.strip()
LOG.debug("Got loop device %s", self.device)
self.linked = True
return True
def get_dev(self):
# NOTE(mikal): the retry is required here in case we are low on loop
# devices. Note however that modern kernels will use more loop devices
# if they exist. If you're seeing lots of retries, consider adding
# more devices.
return self._get_dev_retry_helper()
def unget_dev(self):
if not self.linked:
return
# NOTE(mikal): On some kernels, losetup -d will intermittently fail,
# thus leaking a loop device unless the losetup --detach is retried:
# https://lkml.org/lkml/2012/9/28/62
LOG.debug("Release loop device %s", self.device)
utils.execute('losetup', '--detach', self.device, run_as_root=True,
attempts=3)
self.linked = False
self.device = None
| 36.209677 | 78 | 0.647661 |
de13506dfcde4803030fb4eee704c44489b9d134 | 191 | py | Python | reader.py | Taichinakai/Taichi-Nakai | 83284b93b5739b264783bf849d990f33f8abae85 | [
"MIT"
] | null | null | null | reader.py | Taichinakai/Taichi-Nakai | 83284b93b5739b264783bf849d990f33f8abae85 | [
"MIT"
] | null | null | null | reader.py | Taichinakai/Taichi-Nakai | 83284b93b5739b264783bf849d990f33f8abae85 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 13 16:40:29 2017
@author: taichi
"""
import read_functions as n
import pandas as x
n.read_whole()
n.read_whole2()
n.read_whole3() | 14.692308 | 36 | 0.638743 |
f6a11877eece7b0ad6e008c8ec51592f21d3effa | 1,829 | py | Python | packages/sqlmap-master/plugins/dbms/derby/connector.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
] | null | null | null | packages/sqlmap-master/plugins/dbms/derby/connector.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
] | null | null | null | packages/sqlmap-master/plugins/dbms/derby/connector.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright (c) 2006-2021 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
try:
import drda
except:
pass
import logging
from lib.core.common import getSafeExString
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: https://github.com/nakagami/pydrda/
User guide: https://github.com/nakagami/pydrda/blob/master/README.rst
API: https://www.python.org/dev/peps/pep-0249/
License: MIT
"""
def connect(self):
self.initConnection()
try:
self.connector = drda.connect(host=self.hostname, database=self.db, port=self.port)
except drda.OperationalError as ex:
raise SqlmapConnectionException(getSafeExString(ex))
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except drda.ProgrammingError as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % getSafeExString(ex))
return None
def execute(self, query):
try:
self.cursor.execute(query)
except (drda.OperationalError, drda.ProgrammingError) as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % getSafeExString(ex))
except drda.InternalError as ex:
raise SqlmapConnectionException(getSafeExString(ex))
try:
self.connector.commit()
except drda.OperationalError:
pass
def select(self, query):
self.execute(query)
return self.fetchall()
| 29.031746 | 112 | 0.667031 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.