hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c31542daa5de4d6fed71faf3355aaeb443d153b | 63,918 | py | Python | pandas/core/internals/managers.py | LauraCollard/pandas | b1c3a9031569334cafc4e8d45d35408421f7dea4 | [
"BSD-3-Clause"
] | 2 | 2019-12-02T11:24:30.000Z | 2021-02-28T12:13:54.000Z | pandas/core/internals/managers.py | LauraCollard/pandas | b1c3a9031569334cafc4e8d45d35408421f7dea4 | [
"BSD-3-Clause"
] | 1 | 2019-08-18T16:00:45.000Z | 2019-08-18T16:00:45.000Z | pandas/core/internals/managers.py | LauraCollard/pandas | b1c3a9031569334cafc4e8d45d35408421f7dea4 | [
"BSD-3-Clause"
] | 4 | 2019-10-09T07:52:08.000Z | 2021-07-12T02:37:59.000Z | from collections import defaultdict
from functools import partial
import itertools
import operator
import re
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_convert_objects,
maybe_promote,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_datetimelike_v_numeric,
is_extension_array_dtype,
is_extension_type,
is_list_like,
is_numeric_v_string_like,
is_scalar,
is_sparse,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.indexers import maybe_convert_indices
from pandas.io.formats.printing import pprint_thing
from .blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
_extend_blocks,
_merge_blocks,
_safe_reshape,
get_block_type,
make_block,
)
from .concat import ( # all for concatenate_block_managers
combine_concat_plans,
concatenate_join_units,
get_mgr_concatenation_plan,
is_uniform_join_units,
)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = [
"axes",
"blocks",
"_ndim",
"_shape",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks) # type: Tuple[Block, ...]
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
"Number of Block dimensions ({block}) must equal "
"number of axes ({self})".format(block=block.ndim, self=self.ndim)
)
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
"Length mismatch: Expected axis has {old} elements, new "
"values have {new} elements".format(old=old_len, new=new_len)
)
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True, level=None):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
level : int, default None
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
return obj
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(
0, len(self), 1
)
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self):
return self.axes[0]
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks
],
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
return make_block(values, placement=mgr_locs)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
)
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [
self.axes[0].get_indexer(blk_items) for blk_items in bitems
]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs)
)
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __repr__(self):
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += "\nItems: {ax}".format(ax=ax)
else:
output += "\nAxis {i}: {ax}".format(i=i, ax=ax)
for block in self.blocks:
output += "\n{block}".format(block=pprint_thing(block))
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
"block items\n# manager items: {0}, # "
"tot_items: {1}".format(len(self.items), tot_items)
)
def apply(
self,
f,
axes=None,
filter=None,
do_integrity_check=False,
consolidate=True,
**kwargs
):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager
integrity check
consolidate: boolean, default True. Join together blocks having same
dtype
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs["filter"] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == "where":
align_copy = True
if kwargs.get("align", True):
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
elif f == "putmask":
align_copy = False
if kwargs.get("align", True):
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
elif f == "fillna":
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ["value"]
else:
align_keys = []
# TODO(EA): may interfere with ExtensionBlock.setitem for blocks
# with a .values attribute.
aligned_args = {
k: kwargs[k]
for k in align_keys
if not isinstance(kwargs[k], ABCExtensionArray)
and hasattr(kwargs[k], "values")
}
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, "_info_axis_number", 0)
kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy)
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(
result_blocks, axes or self.axes, do_integrity_check=do_integrity_check
)
bm._consolidate_inplace()
return bm
def quantile(
self,
axis=0,
consolidate=True,
transposed=False,
interpolation="linear",
qs=None,
numeric_only=None,
):
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
Block Manager (new object)
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
if consolidate:
self._consolidate_inplace()
def get_axe(block, qs, axes):
# Because Series dispatches to DataFrame, we will always have
# block.ndim == 2
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate([ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return self.__class__(blocks, new_axes)
# single block, i.e. ndim == {1}
values = concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0]
)
def isna(self, func, **kwargs):
return self.apply("apply", func=func, **kwargs)
def where(self, **kwargs):
return self.apply("where", **kwargs)
def setitem(self, **kwargs):
return self.apply("setitem", **kwargs)
def putmask(self, **kwargs):
return self.apply("putmask", **kwargs)
def diff(self, **kwargs):
return self.apply("diff", **kwargs)
def interpolate(self, **kwargs):
return self.apply("interpolate", **kwargs)
def shift(self, **kwargs):
return self.apply("shift", **kwargs)
def fillna(self, **kwargs):
return self.apply("fillna", **kwargs)
def downcast(self, **kwargs):
return self.apply("downcast", **kwargs)
def astype(self, dtype, **kwargs):
return self.apply("astype", dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply("convert", **kwargs)
def replace(self, value, **kwargs):
assert np.ndim(value) == 0, value
return self.apply("replace", value=value, **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
# figure out our mask a-priori to avoid repeated replacements
values = self.as_array()
def comp(s, regex=False):
"""
Generate a bool array by perform an equality check, or perform
an element-wise regular expression matching
"""
if isna(s):
return isna(values)
if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None:
return _compare_or_regex_search(
maybe_convert_objects(values), s.asm8, regex
)
return _compare_or_regex_search(values, s, regex)
masks = [comp(s, regex) for i, s in enumerate(src_list)]
result_blocks = []
src_len = len(src_list) - 1
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
# TODO: assert/validate that `d` is always a scalar?
new_rb = []
for b in rb:
m = masks[i][b.mgr_locs.indexer]
convert = i == src_len
result = b._replace_coerce(
mask=m,
to_replace=s,
value=d,
inplace=inplace,
convert=convert,
regex=regex,
)
if m.any():
new_rb = _extend_blocks(result, new_rb)
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all(block.is_numeric for block in self.blocks)
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any(block.is_datelike for block in self.blocks)
@property
def any_extension_types(self):
"""Whether any of the blocks in this manager are extension blocks"""
return any(block.is_extension for block in self.blocks)
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(
inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False
)
new_blocks.append(b)
axes = list(self.axes)
axes[0] = self.items.take(indexer)
return self.__class__(new_blocks, axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == "all":
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply("copy", axes=new_axes, deep=deep, do_integrity_check=False)
def as_array(self, transpose=False, items=None):
"""Convert the blockmanager data into an numpy array.
Parameters
----------
transpose : boolean, default False
If True, transpose the return array
items : list of strings or None
Names of block items that will be included in the returned
array. ``None`` means that all block items will be used
Returns
-------
arr : ndarray
"""
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block and mgr.blocks[0].is_datetimetz:
# TODO(Block.get_values): Make DatetimeTZBlock.get_values
# always be object dtype. Some callers seem to want the
# DatetimeArray (previously DTI)
arr = mgr.blocks[0].get_values(dtype=object)
elif self._is_single_block or not self.is_mixed_type:
arr = np.asarray(mgr.blocks[0].get_values())
else:
arr = mgr._interleave()
return arr.transpose() if transpose else arr
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
# TODO: https://github.com/pandas-dev/pandas/issues/22791
# Give EAs some input on what happens here. Sparse needs this.
if is_sparse(dtype):
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError("Some items were not contained in blocks")
return result
def to_dict(self, copy=True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> BlockManager
Notes
-----
This consolidates based on str(dtype)
"""
self._consolidate_inplace()
bd = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()}
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
if is_extension_array_dtype(dtype):
# we'll eventually construct an ExtensionArray.
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk.iget((i, loc))
if is_extension_array_dtype(dtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isna(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isna(self.items)]
# allow a single nan location indexer
if not is_scalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc)
else:
if isna(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(
new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True
)
def iget(self, i):
"""
Return the data as a SingleBlockManager if possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
[
block.make_block_same_class(
values, placement=slice(0, len(values)), ndim=1
)
],
self.axes[1],
)
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
# TODO(EA): Remove an is_extension_ when all extension types satisfy
# the interface
value_is_extension_type = is_extension_type(value) or is_extension_array_dtype(
value
)
# categorical/sparse/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = _safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in libinternals.get_blkno_placements(
blknos, self.nblocks, group=True
):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = algos.take_1d(
new_blknos, self._blknos, axis=0, allow_fill=False
)
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(
values=value.copy(),
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc: int, item, value, allow_duplicates: bool = False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError("cannot insert {}, already exists".format(item))
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster, let's use it if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(
self, new_index, axis, method=None, limit=None, fill_value=None, copy=True
):
"""
Conform block manager to new index.
"""
new_index = ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit
)
return self.reindex_indexer(
new_index, indexer, axis=axis, fill_value=fill_value, copy=copy
)
def reindex_indexer(
self, new_axis, indexer, axis, fill_value=None, allow_dups=False, copy=True
):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer, fill_tuple=(fill_value,))
else:
new_blocks = [
blk.take_nd(
indexer,
axis=axis,
fill_tuple=(
fill_value if fill_value is not None else blk.fill_value,
),
)
for blk in self.blocks
]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill
)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ("slice", "mask"):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [
blk.take_nd(
slobj,
axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple,
)
]
if sl_type in ("slice", "mask"):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = algos.take_1d(
self._blknos, slobj, fill_value=-1, allow_fill=allow_fill
)
blklocs = algos.take_1d(
self._blklocs, slobj, fill_value=-1, allow_fill=allow_fill
)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in libinternals.get_blkno_placements(
blknos, self.nblocks, group=True
):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(
self._make_na_block(placement=mgr_locs, fill_value=fill_value)
)
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(
blk.take_nd(
blklocs[mgr_locs.indexer],
axis=0,
new_mgr_locs=mgr_locs,
fill_tuple=None,
)
)
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (
np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype="int64")
)
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception("Indices must be nonzero and less than the axis length")
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(
new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
)
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(
block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks)
)
def unstack(self, unstacker_func, fill_value):
"""Return a blockmanager with all blocks unstacked.
Parameters
----------
unstacker_func : callable
A (partially-applied) ``pd.core.reshape._Unstacker`` class.
fill_value : Any
fill_value for newly introduced missing values.
Returns
-------
unstacked : BlockManager
"""
n_rows = self.shape[-1]
dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items)
new_columns = dummy.get_new_columns()
new_index = dummy.get_new_index()
new_blocks = []
columns_mask = []
for blk in self.blocks:
blocks, mask = blk._unstack(
partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]),
new_columns,
n_rows,
fill_value,
)
new_blocks.extend(blocks)
columns_mask.extend(mask)
new_columns = new_columns[columns_mask]
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(
self,
block: Block,
axis: Union[Index, List[Index]],
do_integrity_check: bool = False,
fastpath: bool = False,
):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis"
)
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError(
"Cannot create SingleBlockManager with more than 1 block"
)
block = block[0]
else:
self.axes = [ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError(
"Cannot create SingleBlockManager with more than 1 block"
)
block = block[0]
if not isinstance(block, Block):
block = make_block(block, placement=slice(0, len(axis)), ndim=1)
self.blocks = tuple([block])
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
@property
def _blknos(self):
""" compat with BlockManager """
return None
@property
def _blklocs(self):
""" compat with BlockManager """
return None
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(
self._block._slice(slobj), self.index[slobj], fastpath=True
)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
return self.apply("convert", **kwargs)
@property
def dtype(self):
return self._block.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
def external_values(self):
return self._block.external_values()
def internal_values(self):
return self._block.internal_values()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(), copy=False)
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def concat(self, to_concat, new_axis):
"""
Concatenate a list of SingleBlockManagers into a single
SingleBlockManager.
Used for pd.concat of Series objects with axis=0.
Parameters
----------
to_concat : list of SingleBlockManagers
new_axis : Index of the result
Returns
-------
SingleBlockManager
"""
non_empties = [x for x in to_concat if len(x) > 0]
# check if all series are of the same block type:
if len(non_empties) > 0:
blocks = [obj.blocks[0] for obj in non_empties]
if len({b.dtype for b in blocks}) == 1:
new_block = blocks[0].concat_same_type(blocks)
else:
values = [x.values for x in blocks]
values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
else:
values = [x._block.values for x in to_concat]
values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
mgr = SingleBlockManager(new_block, new_axis)
return mgr
# --------------------------------------------------------------------
# Constructor Helpers
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [
make_block(values=blocks[0], placement=slice(0, len(axes[0])))
]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
blocks = [getattr(b, "values", b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
# Correcting the user facing error message during dataframe construction
if len(passed) <= 2:
passed = passed[::-1]
implied = tuple(len(ax) for ax in axes)
# Correcting the user facing error message during dataframe construction
if len(implied) <= 2:
implied = implied[::-1]
if passed == implied and e is not None:
raise e
if block_shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
raise ValueError(
"Shape of passed values is {0}, indices imply {1}".format(passed, implied)
)
# -----------------------------------------------------------------------
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
items_dict = defaultdict(list)
extra_locs = []
names_idx = ensure_index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, k, v))
blocks = []
if len(items_dict["FloatBlock"]):
float_blocks = _multi_blockify(items_dict["FloatBlock"])
blocks.extend(float_blocks)
if len(items_dict["ComplexBlock"]):
complex_blocks = _multi_blockify(items_dict["ComplexBlock"])
blocks.extend(complex_blocks)
if len(items_dict["TimeDeltaBlock"]):
timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"])
blocks.extend(timedelta_blocks)
if len(items_dict["IntBlock"]):
int_blocks = _multi_blockify(items_dict["IntBlock"])
blocks.extend(int_blocks)
if len(items_dict["DatetimeBlock"]):
datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
dttz_blocks = [
make_block(array, klass=DatetimeTZBlock, placement=[i])
for i, _, array in items_dict["DatetimeTZBlock"]
]
blocks.extend(dttz_blocks)
if len(items_dict["BoolBlock"]):
bool_blocks = _simple_blockify(items_dict["BoolBlock"], np.bool_)
blocks.extend(bool_blocks)
if len(items_dict["ObjectBlock"]) > 0:
object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_)
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
cat_blocks = [
make_block(array, klass=CategoricalBlock, placement=[i])
for i, _, array in items_dict["CategoricalBlock"]
]
blocks.extend(cat_blocks)
if len(items_dict["ExtensionBlock"]):
external_blocks = [
make_block(array, klass=ExtensionBlock, placement=[i])
for i, _, array in items_dict["ExtensionBlock"]
]
blocks.extend(external_blocks)
if len(items_dict["ObjectValuesExtensionBlock"]):
external_blocks = [
make_block(array, klass=ObjectValuesExtensionBlock, placement=[i])
for i, _, array in items_dict["ObjectValuesExtensionBlock"]
]
blocks.extend(external_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# TODO: CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return (len(x),)
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(
blocks: List[Block]
) -> Optional[Union[np.dtype, ExtensionDtype]]:
"""Find the common dtype for `blocks`.
Parameters
----------
blocks : List[Block]
Returns
-------
dtype : Optional[Union[np.dtype, ExtensionDtype]]
None is returned when `blocks` is empty.
"""
if not len(blocks):
return None
return find_common_type([b.dtype for b in blocks])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(
list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate
)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _compare_or_regex_search(a, b, regex=False):
"""
Compare two array_like inputs of the same shape or two scalar values
Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
----------
a : array_like or scalar
b : array_like or scalar
regex : bool, default False
Returns
-------
mask : array_like of bool
"""
if not regex:
op = lambda x: operator.eq(x, b)
else:
op = np.vectorize(
lambda x: bool(re.search(b, x)) if isinstance(x, str) else False
)
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisons
if is_datetimelike_v_numeric(a, b):
result = False
# numpy deprecation warning if comparing numeric vs string-like
elif is_numeric_v_string_like(a, b):
result = False
else:
result = op(a)
if is_scalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = "ndarray(dtype={dtype})".format(dtype=a.dtype)
if is_b_array:
type_names[1] = "ndarray(dtype={dtype})".format(dtype=b.dtype)
raise TypeError(
"Cannot compare types {a!r} and {b!r}".format(
a=type_names[0], b=type_names[1]
)
)
return result
def _transform_index(index, func, level=None):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(index, MultiIndex):
if level is not None:
items = [
tuple(func(y) if i == level else y for i, y in enumerate(x))
for x in index
]
else:
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name, tupleize_cols=False)
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return (
"slice",
slice_or_indexer,
libinternals.slice_len(slice_or_indexer, length),
)
elif (
isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_
):
return "mask", slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return "fancy", indexer, len(indexer)
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plans = [
get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
if len(join_units) == 1 and not join_units[0].indexers:
b = join_units[0].block
values = b.values
if copy:
values = values.copy()
elif not copy:
values = values.view()
b = b.make_block_same_class(values, placement=placement)
elif is_uniform_join_units(join_units):
b = join_units[0].block.concat_same_type(
[ju.block for ju in join_units], placement=placement
)
else:
b = make_block(
concatenate_join_units(join_units, concat_axis, copy=copy),
placement=placement,
)
blocks.append(b)
return BlockManager(blocks, axes)
| 31.13395 | 88 | 0.571216 | from collections import defaultdict
from functools import partial
import itertools
import operator
import re
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_convert_objects,
maybe_promote,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_datetimelike_v_numeric,
is_extension_array_dtype,
is_extension_type,
is_list_like,
is_numeric_v_string_like,
is_scalar,
is_sparse,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.indexers import maybe_convert_indices
from pandas.io.formats.printing import pprint_thing
from .blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
_extend_blocks,
_merge_blocks,
_safe_reshape,
get_block_type,
make_block,
)
from .concat import (
combine_concat_plans,
concatenate_join_units,
get_mgr_concatenation_plan,
is_uniform_join_units,
)
class BlockManager(PandasObject):
__slots__ = [
"axes",
"blocks",
"_ndim",
"_shape",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
"Number of Block dimensions ({block}) must equal "
"number of axes ({self})".format(block=block.ndim, self=self.ndim)
)
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
if axes is None:
axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]]
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
"Length mismatch: Expected axis has {old} elements, new "
"values have {new} elements".format(old=old_len, new=new_len)
)
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True, level=None):
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
return obj
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(
0, len(self), 1
)
def _rebuild_blknos_and_blklocs(self):
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self):
return self.axes[0]
def _get_counts(self, f):
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks
],
}
}
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
return make_block(values, placement=mgr_locs)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
)
else:
ax_arrays, bvalues, bitems = state[:3]
self.axes = [ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [
self.axes[0].get_indexer(blk_items) for blk_items in bitems
]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs)
)
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __repr__(self):
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += "\nItems: {ax}".format(ax=ax)
else:
output += "\nAxis {i}: {ax}".format(i=i, ax=ax)
for block in self.blocks:
output += "\n{block}".format(block=pprint_thing(block))
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
"block items\n# manager items: {0}, # "
"tot_items: {1}".format(len(self.items), tot_items)
)
def apply(
self,
f,
axes=None,
filter=None,
do_integrity_check=False,
consolidate=True,
**kwargs
):
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs["filter"] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == "where":
align_copy = True
if kwargs.get("align", True):
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
elif f == "putmask":
align_copy = False
if kwargs.get("align", True):
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
elif f == "fillna":
# fillna internally does putmask, maybe it's better to do this
align_copy = False
align_keys = ["value"]
else:
align_keys = []
aligned_args = {
k: kwargs[k]
for k in align_keys
if not isinstance(kwargs[k], ABCExtensionArray)
and hasattr(kwargs[k], "values")
}
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, "_info_axis_number", 0)
kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy)
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(
result_blocks, axes or self.axes, do_integrity_check=do_integrity_check
)
bm._consolidate_inplace()
return bm
def quantile(
self,
axis=0,
consolidate=True,
transposed=False,
interpolation="linear",
qs=None,
numeric_only=None,
):
assert self.ndim >= 2
if consolidate:
self._consolidate_inplace()
def get_axe(block, qs, axes):
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
if len(blocks) > 1:
new_axes[1] = axes[0]
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate([ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return self.__class__(blocks, new_axes)
values = concat_compat([b.values for b in blocks])
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0]
)
def isna(self, func, **kwargs):
return self.apply("apply", func=func, **kwargs)
def where(self, **kwargs):
return self.apply("where", **kwargs)
def setitem(self, **kwargs):
return self.apply("setitem", **kwargs)
def putmask(self, **kwargs):
return self.apply("putmask", **kwargs)
def diff(self, **kwargs):
return self.apply("diff", **kwargs)
def interpolate(self, **kwargs):
return self.apply("interpolate", **kwargs)
def shift(self, **kwargs):
return self.apply("shift", **kwargs)
def fillna(self, **kwargs):
return self.apply("fillna", **kwargs)
def downcast(self, **kwargs):
return self.apply("downcast", **kwargs)
def astype(self, dtype, **kwargs):
return self.apply("astype", dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply("convert", **kwargs)
def replace(self, value, **kwargs):
assert np.ndim(value) == 0, value
return self.apply("replace", value=value, **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
inplace = validate_bool_kwarg(inplace, "inplace")
values = self.as_array()
def comp(s, regex=False):
if isna(s):
return isna(values)
if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None:
return _compare_or_regex_search(
maybe_convert_objects(values), s.asm8, regex
)
return _compare_or_regex_search(values, s, regex)
masks = [comp(s, regex) for i, s in enumerate(src_list)]
result_blocks = []
src_len = len(src_list) - 1
for blk in self.blocks:
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
m = masks[i][b.mgr_locs.indexer]
convert = i == src_len
result = b._replace_coerce(
mask=m,
to_replace=s,
value=d,
inplace=inplace,
convert=convert,
regex=regex,
)
if m.any():
new_rb = _extend_blocks(result, new_rb)
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def is_consolidated(self):
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
self._consolidate_inplace()
return all(block.is_numeric for block in self.blocks)
@property
def is_datelike_mixed_type(self):
self._consolidate_inplace()
return any(block.is_datelike for block in self.blocks)
@property
def any_extension_types(self):
return any(block.is_extension for block in self.blocks)
@property
def is_view(self):
if len(self.blocks) == 1:
return self.blocks[0].is_view
return False
def get_bool_data(self, copy=False):
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
if len(blocks) == 0:
return self.make_empty()
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(
inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False
)
new_blocks.append(b)
axes = list(self.axes)
axes[0] = self.items.take(indexer)
return self.__class__(new_blocks, axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
if deep:
if deep == "all":
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply("copy", axes=new_axes, deep=deep, do_integrity_check=False)
def as_array(self, transpose=False, items=None):
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block and mgr.blocks[0].is_datetimetz:
arr = mgr.blocks[0].get_values(dtype=object)
elif self._is_single_block or not self.is_mixed_type:
arr = np.asarray(mgr.blocks[0].get_values())
else:
arr = mgr._interleave()
return arr.transpose() if transpose else arr
def _interleave(self):
dtype = _interleaved_dtype(self.blocks)
if is_sparse(dtype):
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError("Some items were not contained in blocks")
return result
def to_dict(self, copy=True):
self._consolidate_inplace()
bd = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()}
def fast_xs(self, loc):
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
dtype = _interleaved_dtype(self.blocks)
n = len(items)
if is_extension_array_dtype(dtype):
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk.iget((i, loc))
if is_extension_array_dtype(dtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
return result
def consolidate(self):
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item):
if self.items.is_unique:
if not isna(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isna(self.items)]
# allow a single nan location indexer
if not is_scalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc)
else:
if isna(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(
new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True
)
def iget(self, i):
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
[
block.make_block_same_class(
values, placement=slice(0, len(values)), ndim=1
)
],
self.axes[1],
)
def delete(self, item):
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value):
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
# TODO(EA): Remove an is_extension_ when all extension types satisfy
# the interface
value_is_extension_type = is_extension_type(value) or is_extension_array_dtype(
value
)
# categorical/sparse/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = _safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in libinternals.get_blkno_placements(
blknos, self.nblocks, group=True
):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = algos.take_1d(
new_blknos, self._blknos, axis=0, allow_fill=False
)
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_extension_type:
new_blocks.extend(
make_block(
values=value.copy(),
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
self._known_consolidated = False
def insert(self, loc: int, item, value, allow_duplicates: bool = False):
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError("cannot insert {}, already exists".format(item))
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster, let's use it if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(
self, new_index, axis, method=None, limit=None, fill_value=None, copy=True
):
new_index = ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit
)
return self.reindex_indexer(
new_index, indexer, axis=axis, fill_value=fill_value, copy=copy
)
def reindex_indexer(
self, new_axis, indexer, axis, fill_value=None, allow_dups=False, copy=True
):
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer, fill_tuple=(fill_value,))
else:
new_blocks = [
blk.take_nd(
indexer,
axis=axis,
fill_tuple=(
fill_value if fill_value is not None else blk.fill_value,
),
)
for blk in self.blocks
]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill
)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ("slice", "mask"):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [
blk.take_nd(
slobj,
axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple,
)
]
if sl_type in ("slice", "mask"):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = algos.take_1d(
self._blknos, slobj, fill_value=-1, allow_fill=allow_fill
)
blklocs = algos.take_1d(
self._blklocs, slobj, fill_value=-1, allow_fill=allow_fill
)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in libinternals.get_blkno_placements(
blknos, self.nblocks, group=True
):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(
self._make_na_block(placement=mgr_locs, fill_value=fill_value)
)
else:
blk = self.blocks[blkno]
if not blk._can_consolidate:
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(
blk.take_nd(
blklocs[mgr_locs.indexer],
axis=0,
new_mgr_locs=mgr_locs,
fill_tuple=None,
)
)
return blocks
def _make_na_block(self, placement, fill_value=None):
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
self._consolidate_inplace()
indexer = (
np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype="int64")
)
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception("Indices must be nonzero and less than the axis length")
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(
new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
)
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(
block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks)
)
def unstack(self, unstacker_func, fill_value):
n_rows = self.shape[-1]
dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items)
new_columns = dummy.get_new_columns()
new_index = dummy.get_new_index()
new_blocks = []
columns_mask = []
for blk in self.blocks:
blocks, mask = blk._unstack(
partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]),
new_columns,
n_rows,
fill_value,
)
new_blocks.extend(blocks)
columns_mask.extend(mask)
new_columns = new_columns[columns_mask]
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
class SingleBlockManager(BlockManager):
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(
self,
block: Block,
axis: Union[Index, List[Index]],
do_integrity_check: bool = False,
fastpath: bool = False,
):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis"
)
axis = axis[0]
if fastpath:
self.axes = [axis]
if isinstance(block, list):
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError(
"Cannot create SingleBlockManager with more than 1 block"
)
block = block[0]
else:
self.axes = [ensure_index(axis)]
if isinstance(block, list):
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError(
"Cannot create SingleBlockManager with more than 1 block"
)
block = block[0]
if not isinstance(block, Block):
block = make_block(block, placement=slice(0, len(axis)), ndim=1)
self.blocks = tuple([block])
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
@property
def _blknos(self):
return None
@property
def _blklocs(self):
return None
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(
self._block._slice(slobj), self.index[slobj], fastpath=True
)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
return self.apply("convert", **kwargs)
@property
def dtype(self):
return self._block.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
def external_values(self):
return self._block.external_values()
def internal_values(self):
return self._block.internal_values()
def get_values(self):
return np.array(self._block.to_dense(), copy=False)
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
return self._block.values[loc]
def concat(self, to_concat, new_axis):
non_empties = [x for x in to_concat if len(x) > 0]
if len(non_empties) > 0:
blocks = [obj.blocks[0] for obj in non_empties]
if len({b.dtype for b in blocks}) == 1:
new_block = blocks[0].concat_same_type(blocks)
else:
values = [x.values for x in blocks]
values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
else:
values = [x._block.values for x in to_concat]
values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
mgr = SingleBlockManager(new_block, new_axis)
return mgr
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
if not len(blocks[0]):
blocks = []
else:
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [
make_block(values=blocks[0], placement=slice(0, len(axes[0])))
]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
blocks = [getattr(b, "values", b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def construction_error(tot_items, block_shape, axes, e=None):
passed = tuple(map(int, [tot_items] + list(block_shape)))
if len(passed) <= 2:
passed = passed[::-1]
implied = tuple(len(ax) for ax in axes)
if len(implied) <= 2:
implied = implied[::-1]
if passed == implied and e is not None:
raise e
if block_shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
raise ValueError(
"Shape of passed values is {0}, indices imply {1}".format(passed, implied)
)
def form_blocks(arrays, names, axes):
items_dict = defaultdict(list)
extra_locs = []
names_idx = ensure_index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, k, v))
blocks = []
if len(items_dict["FloatBlock"]):
float_blocks = _multi_blockify(items_dict["FloatBlock"])
blocks.extend(float_blocks)
if len(items_dict["ComplexBlock"]):
complex_blocks = _multi_blockify(items_dict["ComplexBlock"])
blocks.extend(complex_blocks)
if len(items_dict["TimeDeltaBlock"]):
timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"])
blocks.extend(timedelta_blocks)
if len(items_dict["IntBlock"]):
int_blocks = _multi_blockify(items_dict["IntBlock"])
blocks.extend(int_blocks)
if len(items_dict["DatetimeBlock"]):
datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
dttz_blocks = [
make_block(array, klass=DatetimeTZBlock, placement=[i])
for i, _, array in items_dict["DatetimeTZBlock"]
]
blocks.extend(dttz_blocks)
if len(items_dict["BoolBlock"]):
bool_blocks = _simple_blockify(items_dict["BoolBlock"], np.bool_)
blocks.extend(bool_blocks)
if len(items_dict["ObjectBlock"]) > 0:
object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_)
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
cat_blocks = [
make_block(array, klass=CategoricalBlock, placement=[i])
for i, _, array in items_dict["CategoricalBlock"]
]
blocks.extend(cat_blocks)
if len(items_dict["ExtensionBlock"]):
external_blocks = [
make_block(array, klass=ExtensionBlock, placement=[i])
for i, _, array in items_dict["ExtensionBlock"]
]
blocks.extend(external_blocks)
if len(items_dict["ObjectValuesExtensionBlock"]):
external_blocks = [
make_block(array, klass=ObjectValuesExtensionBlock, placement=[i])
for i, _, array in items_dict["ObjectValuesExtensionBlock"]
]
blocks.extend(external_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
values, placement = _stack_arrays(tuples, dtype)
if dtype is not None and values.dtype != dtype:
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return (len(x),)
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(
blocks: List[Block]
) -> Optional[Union[np.dtype, ExtensionDtype]]:
if not len(blocks):
return None
return find_common_type([b.dtype for b in blocks])
def _consolidate(blocks):
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(
list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate
)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _compare_or_regex_search(a, b, regex=False):
if not regex:
op = lambda x: operator.eq(x, b)
else:
op = np.vectorize(
lambda x: bool(re.search(b, x)) if isinstance(x, str) else False
)
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
if is_datetimelike_v_numeric(a, b):
result = False
elif is_numeric_v_string_like(a, b):
result = False
else:
result = op(a)
if is_scalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = "ndarray(dtype={dtype})".format(dtype=a.dtype)
if is_b_array:
type_names[1] = "ndarray(dtype={dtype})".format(dtype=b.dtype)
raise TypeError(
"Cannot compare types {a!r} and {b!r}".format(
a=type_names[0], b=type_names[1]
)
)
return result
def _transform_index(index, func, level=None):
if isinstance(index, MultiIndex):
if level is not None:
items = [
tuple(func(y) if i == level else y for i, y in enumerate(x))
for x in index
]
else:
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name, tupleize_cols=False)
def _fast_count_smallints(arr):
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return (
"slice",
slice_or_indexer,
libinternals.slice_len(slice_or_indexer, length),
)
elif (
isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_
):
return "mask", slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return "fancy", indexer, len(indexer)
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
concat_plans = [
get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
if len(join_units) == 1 and not join_units[0].indexers:
b = join_units[0].block
values = b.values
if copy:
values = values.copy()
elif not copy:
values = values.view()
b = b.make_block_same_class(values, placement=placement)
elif is_uniform_join_units(join_units):
b = join_units[0].block.concat_same_type(
[ju.block for ju in join_units], placement=placement
)
else:
b = make_block(
concatenate_join_units(join_units, concat_axis, copy=copy),
placement=placement,
)
blocks.append(b)
return BlockManager(blocks, axes)
| true | true |
1c3154da6b77f5e2da043f184d552568ae99cd97 | 388 | py | Python | tests/swig/python/family/driver.py | bragaigor/souffle | c39cc5cdb73d23d9af0d48976077c4c0a7943cf2 | [
"UPL-1.0"
] | 570 | 2016-03-15T15:17:18.000Z | 2022-03-27T09:47:45.000Z | tests/swig/python/family/driver.py | bragaigor/souffle | c39cc5cdb73d23d9af0d48976077c4c0a7943cf2 | [
"UPL-1.0"
] | 1,744 | 2016-03-16T19:19:19.000Z | 2022-03-31T21:28:28.000Z | tests/swig/python/family/driver.py | bragaigor/souffle | c39cc5cdb73d23d9af0d48976077c4c0a7943cf2 | [
"UPL-1.0"
] | 183 | 2016-03-12T17:51:20.000Z | 2022-03-24T12:03:47.000Z | """
Souffle - A Datalog Compiler
Copyright (c) 2019, The Souffle Developers. All rights reserved
Licensed under the Universal Permissive License v 1.0 as shown at:
- https://opensource.org/licenses/UPL
- <souffle root>/licenses/SOUFFLE-UPL.txt
"""
import SwigInterface
import sys
p = SwigInterface.newInstance('family')
p.loadAll(sys.argv[1])
p.run()
p.printAll('.')
p.thisown = 1
del p
| 22.823529 | 66 | 0.747423 |
import SwigInterface
import sys
p = SwigInterface.newInstance('family')
p.loadAll(sys.argv[1])
p.run()
p.printAll('.')
p.thisown = 1
del p
| true | true |
1c3155289db24c45371df69cb5782cb0502ebb2a | 2,038 | py | Python | forum2/urls.py | boxed/forum | abb3699d310bf3a404f031a3cb0e4bdbf403da5a | [
"BSD-3-Clause"
] | 2 | 2019-06-28T16:30:44.000Z | 2020-12-28T01:46:52.000Z | forum2/urls.py | boxed/forum | abb3699d310bf3a404f031a3cb0e4bdbf403da5a | [
"BSD-3-Clause"
] | 14 | 2019-02-26T17:25:54.000Z | 2019-04-03T18:11:24.000Z | forum2/urls.py | boxed/forum | abb3699d310bf3a404f031a3cb0e4bdbf403da5a | [
"BSD-3-Clause"
] | 1 | 2019-06-14T14:21:47.000Z | 2019-06-14T14:21:47.000Z | """forum2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from iommi.admin import Admin
import forum.views as views
import forum2.views
import forum2.views as f2views
import unread.views
import unread.views as unread_views
import issues
class MyAdmin(Admin):
class Meta:
iommi_style = 'bootstrap'
urlpatterns = [
path('', forum2.views.index),
path('login/', forum2.views.login),
path('welcome/', forum2.views.welcome),
path('logout/', forum2.views.logout),
path('test_n_plus_1/', forum2.views.test_n_plus_1),
path('subscriptions/', views.subscriptions),
path('rooms/', views.rooms),
path('rooms/<int:room_pk>/', views.view_room),
path('rooms/<int:room_pk>/write/', views.write),
path('rooms/<int:room_pk>/message/<int:message_pk>/edit/', views.write),
path('rooms/<int:room_pk>/message/<int:message_pk>/delete/', views.delete),
path('subscribe/', unread_views.change_subscription),
path('wiki/', include('wiki.urls')),
path('issues/', include('issues.urls')),
path('', include('authentication.urls')),
path('api/0/unread_simple/', unread.views.api_unread_simple),
path('api/0/unread/', unread.views.api_unread),
path('error_test/', f2views.error_test),
path('blank/', f2views.blank),
path('issues/', include('issues.urls')),
path('archive/', include('archive.urls')),
path('iommi-admin/', include(MyAdmin.urls())),
]
| 32.870968 | 79 | 0.68842 | from django.urls import path, include
from iommi.admin import Admin
import forum.views as views
import forum2.views
import forum2.views as f2views
import unread.views
import unread.views as unread_views
import issues
class MyAdmin(Admin):
class Meta:
iommi_style = 'bootstrap'
urlpatterns = [
path('', forum2.views.index),
path('login/', forum2.views.login),
path('welcome/', forum2.views.welcome),
path('logout/', forum2.views.logout),
path('test_n_plus_1/', forum2.views.test_n_plus_1),
path('subscriptions/', views.subscriptions),
path('rooms/', views.rooms),
path('rooms/<int:room_pk>/', views.view_room),
path('rooms/<int:room_pk>/write/', views.write),
path('rooms/<int:room_pk>/message/<int:message_pk>/edit/', views.write),
path('rooms/<int:room_pk>/message/<int:message_pk>/delete/', views.delete),
path('subscribe/', unread_views.change_subscription),
path('wiki/', include('wiki.urls')),
path('issues/', include('issues.urls')),
path('', include('authentication.urls')),
path('api/0/unread_simple/', unread.views.api_unread_simple),
path('api/0/unread/', unread.views.api_unread),
path('error_test/', f2views.error_test),
path('blank/', f2views.blank),
path('issues/', include('issues.urls')),
path('archive/', include('archive.urls')),
path('iommi-admin/', include(MyAdmin.urls())),
]
| true | true |
1c31562ad79eb4b6493ac8daf61affd73e900676 | 2,989 | py | Python | ast.py | xXGeistmeisterXx/ast-tools | b55598cb2eb67627036e4e619a9cb26b9f50093f | [
"MIT"
] | null | null | null | ast.py | xXGeistmeisterXx/ast-tools | b55598cb2eb67627036e4e619a9cb26b9f50093f | [
"MIT"
] | null | null | null | ast.py | xXGeistmeisterXx/ast-tools | b55598cb2eb67627036e4e619a9cb26b9f50093f | [
"MIT"
] | null | null | null | import json
import re
default = [0.0, 0.0, 0.0]
rounding = 5
def setValue(dict, obj, pose, pose_name):
if(pose in dict):
obj[pose_name] = [float(dict[pose][0][:len(dict[pose][0])-1]), float(dict[pose][1][:len(dict[pose][1])-1]), float(dict[pose][2][:len(dict[pose][2])-1])]
else:
obj[pose_name] = default.copy()
def get_armorstand(s_string):
s_list = s_string.split(" ")
s_json = " ".join(s_list[5:])
regex = r'(([a-z]|[A-Z]|[0-9]+|\.|-|_)+)'
matches = re.findall(regex, s_json)
final_list = []
for match in matches:
if(not '"' in s_json[s_json.find(match[0]) - 1]):
index = s_json.find(match[0]) + len(match[0]) + 2
s_json = s_json.replace(match[0], '"' + match[0] + '"', 1)
final_list.append(s_json[:index])
s_json = s_json[index:]
final = "".join(final_list) + s_json
s_dict = json.loads(final)
obj = {}
obj["name"] = s_dict["CustomName"]
obj["location"] = [float(s_list[2]), float(s_list[3]), float(s_list[4])]
obj["rotation"] = float(s_dict["Rotation"][0][:len(s_dict["Rotation"][0])-1])
setValue(s_dict["Pose"], obj, "Head", "head")
setValue(s_dict["Pose"], obj, "Body", "body")
setValue(s_dict["Pose"], obj, "RightLeg", "rleg")
setValue(s_dict["Pose"], obj, "LeftLeg", "lleg")
setValue(s_dict["Pose"], obj, "RightArm", "rarm")
setValue(s_dict["Pose"], obj, "LeftArm", "larm")
return obj
def compare_stands(stand1, stand2):
for key in stand1:
if(stand1[key] != stand2[key] and key != "name"):
command = ""
if(key == "location"):
delta = [round(stand2["location"][0] - stand1["location"][0], rounding), round(stand2["location"][1] - stand1["location"][1], rounding), round(stand2["location"][2] - stand1["location"][2], rounding)]
command = f'/asa animate {stand1["name"]} {stand1["location"][0]} {stand1["location"][1]} {stand1["location"][2]} 10 {key} {delta[0]} {delta[1]} {delta[2]} 10'
elif(key == "rotation"):
delta = -1.00000 * (stand2["rotation"] - stand1["rotation"])
if(delta > 180):
delta = delta - 360.00000
if(delta < -180):
delta = delta + 360.00000
delta = round(delta, rounding)
command = f'/asa animate {stand1["name"]} {stand1["location"][0]} {stand1["location"][1]} {stand1["location"][2]} 10 {key} {delta} 10'
else:
delta = [round(stand2[key][0] - stand1[key][0], rounding), round(stand2[key][1] - stand1[key][1], rounding), round(stand2[key][2] - stand1[key][2], rounding)]
command = f'/asa animate {stand1["name"]} {stand1["location"][0]} {stand1["location"][1]} {stand1["location"][2]} 10 {key} {delta[0]} {delta[1]} {delta[2]} 10'
print(command)
s_start = input("start summon: ")
start = get_armorstand(s_start)
s_finish = input("finish summon: ")
finish = get_armorstand(s_finish)
print()
compare_stands(start, finish)
while(True):
print()
s_next = input("next summon: ")
next = get_armorstand(s_next)
print()
compare_stands(finish, next)
finish = next.copy()
| 37.835443 | 205 | 0.611576 | import json
import re
default = [0.0, 0.0, 0.0]
rounding = 5
def setValue(dict, obj, pose, pose_name):
if(pose in dict):
obj[pose_name] = [float(dict[pose][0][:len(dict[pose][0])-1]), float(dict[pose][1][:len(dict[pose][1])-1]), float(dict[pose][2][:len(dict[pose][2])-1])]
else:
obj[pose_name] = default.copy()
def get_armorstand(s_string):
s_list = s_string.split(" ")
s_json = " ".join(s_list[5:])
regex = r'(([a-z]|[A-Z]|[0-9]+|\.|-|_)+)'
matches = re.findall(regex, s_json)
final_list = []
for match in matches:
if(not '"' in s_json[s_json.find(match[0]) - 1]):
index = s_json.find(match[0]) + len(match[0]) + 2
s_json = s_json.replace(match[0], '"' + match[0] + '"', 1)
final_list.append(s_json[:index])
s_json = s_json[index:]
final = "".join(final_list) + s_json
s_dict = json.loads(final)
obj = {}
obj["name"] = s_dict["CustomName"]
obj["location"] = [float(s_list[2]), float(s_list[3]), float(s_list[4])]
obj["rotation"] = float(s_dict["Rotation"][0][:len(s_dict["Rotation"][0])-1])
setValue(s_dict["Pose"], obj, "Head", "head")
setValue(s_dict["Pose"], obj, "Body", "body")
setValue(s_dict["Pose"], obj, "RightLeg", "rleg")
setValue(s_dict["Pose"], obj, "LeftLeg", "lleg")
setValue(s_dict["Pose"], obj, "RightArm", "rarm")
setValue(s_dict["Pose"], obj, "LeftArm", "larm")
return obj
def compare_stands(stand1, stand2):
for key in stand1:
if(stand1[key] != stand2[key] and key != "name"):
command = ""
if(key == "location"):
delta = [round(stand2["location"][0] - stand1["location"][0], rounding), round(stand2["location"][1] - stand1["location"][1], rounding), round(stand2["location"][2] - stand1["location"][2], rounding)]
command = f'/asa animate {stand1["name"]} {stand1["location"][0]} {stand1["location"][1]} {stand1["location"][2]} 10 {key} {delta[0]} {delta[1]} {delta[2]} 10'
elif(key == "rotation"):
delta = -1.00000 * (stand2["rotation"] - stand1["rotation"])
if(delta > 180):
delta = delta - 360.00000
if(delta < -180):
delta = delta + 360.00000
delta = round(delta, rounding)
command = f'/asa animate {stand1["name"]} {stand1["location"][0]} {stand1["location"][1]} {stand1["location"][2]} 10 {key} {delta} 10'
else:
delta = [round(stand2[key][0] - stand1[key][0], rounding), round(stand2[key][1] - stand1[key][1], rounding), round(stand2[key][2] - stand1[key][2], rounding)]
command = f'/asa animate {stand1["name"]} {stand1["location"][0]} {stand1["location"][1]} {stand1["location"][2]} 10 {key} {delta[0]} {delta[1]} {delta[2]} 10'
print(command)
s_start = input("start summon: ")
start = get_armorstand(s_start)
s_finish = input("finish summon: ")
finish = get_armorstand(s_finish)
print()
compare_stands(start, finish)
while(True):
print()
s_next = input("next summon: ")
next = get_armorstand(s_next)
print()
compare_stands(finish, next)
finish = next.copy()
| true | true |
1c31564e9f41bef1f7beef9e9f4cb58fe1f9d57c | 23,221 | py | Python | ploy_ezjail/__init__.py | ployground/ploy_ezjail | d55eb49a9a9163117c3b44b5308ca3424f8e2def | [
"BSD-3-Clause"
] | 2 | 2016-04-22T09:12:00.000Z | 2021-07-27T08:58:06.000Z | ploy_ezjail/__init__.py | ployground/ploy_ezjail | d55eb49a9a9163117c3b44b5308ca3424f8e2def | [
"BSD-3-Clause"
] | 7 | 2015-01-30T23:43:01.000Z | 2017-03-31T19:53:28.000Z | ploy_ezjail/__init__.py | ployground/ploy_ezjail | d55eb49a9a9163117c3b44b5308ca3424f8e2def | [
"BSD-3-Clause"
] | 4 | 2015-10-16T10:58:50.000Z | 2018-02-22T18:05:11.000Z | from fnmatch import fnmatch
from lazy import lazy
from ploy.common import BaseMaster, Executor, StartupScriptMixin
from ploy.common import parse_ssh_keygen
from ploy.config import BaseMassager, value_asbool
from ploy.plain import Instance as PlainInstance
from ploy.proxy import ProxyInstance
import logging
import re
import socket
import sys
import time
log = logging.getLogger('ploy_ezjail')
class EzjailError(Exception):
pass
rc_startup = """#!/bin/sh
#
# BEFORE: DAEMON
# PROVIDE: ploy_startup_script
#
# ploy startup script
. /etc/rc.subr
name=ploy_startup_script
start_cmd=startup
startup() {
# Remove traces of ourself
# N.B.: Do NOT rm $0, it points to /etc/rc
##########################
rm -f "/etc/rc.d/ploy_startup_script"
test -e /etc/startup_script && /etc/startup_script || true
test -e /etc/startup_script && chmod 0600 /etc/startup_script
}
run_rc_command "$1"
"""
class Instance(PlainInstance, StartupScriptMixin):
sectiongroupname = 'ez-instance'
_id_regexp = re.compile('^[a-zA-Z0-9_]+$')
@property
def _name(self):
return self.config.get('ezjail-name', self.id)
def validate_id(self, sid):
if self._id_regexp.match(sid) is None:
log.error("Invalid instance name '%s'. An ezjail instance name may only contain letters, numbers and underscores." % sid)
sys.exit(1)
return sid
def get_ip(self):
first_ip = self.config['ip']
if ',' in first_ip:
first_ip = first_ip.split(',')[0]
if '|' in first_ip:
first_ip = first_ip.split('|')[1]
return first_ip
def get_host(self):
return self.config.get('host', self.get_ip())
def get_fingerprint(self):
status = self._status()
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
sys.exit(1)
if status != 'running':
log.info("Instance state: %s", status)
sys.exit(1)
rc, out, err = self.master.ezjail_admin('console', name=self._name, cmd='ssh-keygen -lf /etc/ssh/ssh_host_rsa_key.pub')
info = out.split()
return info[1]
def get_fingerprints(self):
status = self._status()
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
sys.exit(1)
if status != 'running':
log.info("Instance state: %s", status)
sys.exit(1)
result = []
rc, out, err = self.master.ezjail_admin(
'console', name=self._name,
cmd='ls /etc/ssh/')
if rc != 0:
return result
pub_key_names = list(
x for x in out.splitlines()
if fnmatch(x, 'ssh_host*_key.pub'))
for pub_key_name in pub_key_names:
rc, out, err = self.master.ezjail_admin(
'console', name=self._name,
cmd='ssh-keygen -lf /etc/ssh/%s' % pub_key_name)
if rc != 0:
continue
(key,) = parse_ssh_keygen(out)
info = dict(
fingerprint=key.fingerprint,
keylen=key.keylen,
keytype=key.keytype)
result.append(info)
return result
def get_massagers(self):
return get_instance_massagers()
def init_ssh_key(self, user=None):
status = self._status()
if status == 'unavailable':
log.error("Instance '%s' unavailable", self.id)
raise self.paramiko.SSHException()
if status != 'running':
log.error("Instance state: %s", status)
raise self.paramiko.SSHException()
if 'proxyhost' not in self.config:
self.config['proxyhost'] = self.master.id
if 'proxycommand' not in self.config:
mi = self.master.instance
self.config['proxycommand'] = self.proxycommand_with_instance(mi)
return PlainInstance.init_ssh_key(self, user=user)
def _status(self, jails=None):
if jails is None:
jails = self.master.ezjail_admin('list')
if self._name not in jails:
return 'unavailable'
jail = jails[self._name]
status = jail['status']
if len(status) != 2 or status[0] not in 'DIEBZ' or status[1] not in 'RAS':
raise EzjailError("Invalid jail status '%s' for '%s'" % (status, self._name))
if status[1] == 'R':
return 'running'
elif status[1] == 'S':
return 'stopped'
raise EzjailError("Don't know how to handle mounted but not running jail '%s'" % self._name)
def status(self):
try:
jails = self.master.ezjail_admin('list')
except EzjailError as e:
log.error("Can't get status of jails: %s", e)
return
status = self._status(jails)
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
return
if status != 'running':
log.info("Instance state: %s", status)
return
log.info("Instance running.")
log.info("Instances jail id: %s" % jails[self._name]['jid'])
if self._name != self.id:
log.info("Instances jail name: %s" % self._name)
log.info("Instances jail ip: %s" % jails[self._name]['ip'])
def start(self, overrides=None):
jails = self.master.ezjail_admin('list')
status = self._status(jails)
startup_script = None
if status == 'unavailable':
startup_script = self.startup_script(overrides=overrides)
log.info("Creating instance '%s'", self.id)
if 'ip' not in self.config:
log.error("No IP address set for instance '%s'", self.id)
sys.exit(1)
try:
self.master.ezjail_admin(
'create',
name=self._name,
ip=self.config['ip'],
flavour=self.config.get('flavour'))
except EzjailError as e:
for line in e.args[0].splitlines():
log.error(line)
sys.exit(1)
jails = self.master.ezjail_admin('list')
jail = jails.get(self._name)
startup_dest = '%s/etc/startup_script' % jail['root']
rc, out, err = self.master._exec(
'sh', '-c', 'cat - > "%s"' % startup_dest,
stdin=startup_script)
if rc != 0:
log.error("Startup script creation failed.")
log.error(err)
sys.exit(1)
rc, out, err = self.master._exec("chmod", "0700", startup_dest)
if rc != 0:
log.error("Startup script chmod failed.")
log.error(err)
sys.exit(1)
rc_startup_dest = '%s/etc/rc.d/ploy_startup_script' % jail['root']
rc, out, err = self.master._exec(
'sh', '-c', 'cat - > "%s"' % rc_startup_dest,
stdin=rc_startup)
if rc != 0:
log.error("Startup rc script creation failed.")
log.error(err)
sys.exit(1)
rc, out, err = self.master._exec("chmod", "0700", rc_startup_dest)
if rc != 0:
log.error("Startup rc script chmod failed.")
log.error(err)
sys.exit(1)
status = self._status(jails)
if status != 'stopped':
log.info("Instance state: %s", status)
log.info("Instance already started")
return True
rc_provide = self.config.get('rc_provide', '')
self.master._exec(
"sed",
"-i",
"",
"-e",
"s/\# PROVIDE:.*$/\# PROVIDE: standard_ezjail %s %s/" % (self._name, rc_provide),
"/usr/local/etc/ezjail/%s" % self._name)
rc_require = self.config.get('rc_require')
if rc_require is not None:
self.master._exec(
"sed",
"-i",
"",
"-e",
"s/\# REQUIRE:.*$/\# REQUIRE: %s/" % rc_require,
"/usr/local/etc/ezjail/%s" % self._name)
mounts = []
for mount in self.config.get('mounts', []):
src = mount['src'].format(
zfs=self.master.zfs,
name=self._name)
dst = mount['dst'].format(
name=self._name)
create_mount = mount.get('create', False)
mounts.append(dict(src=src, dst=dst, ro=mount.get('ro', False)))
if create_mount:
rc, out, err = self.master._exec("mkdir", "-p", src)
if rc != 0:
log.error("Couldn't create source directory '%s' for mountpoint '%s'." % src, mount['src'])
log.error(err)
sys.exit(1)
if mounts:
jail = jails.get(self._name)
jail_fstab = '/etc/fstab.%s' % self._name
jail_root = jail['root'].rstrip('/')
log.info("Setting up mount points")
rc, out, err = self.master._exec("head", "-n", "1", jail_fstab)
fstab = out.splitlines()
fstab = fstab[:1]
fstab.append('# mount points from ploy')
for mount in mounts:
self.master._exec(
"mkdir", "-p", "%s%s" % (jail_root, mount['dst']))
if mount['ro']:
mode = 'ro'
else:
mode = 'rw'
fstab.append('%s %s%s nullfs %s 0 0' % (mount['src'], jail_root, mount['dst'], mode))
fstab.append('')
rc, out, err = self.master._exec(
'sh', '-c', 'cat - > "%s"' % jail_fstab,
stdin='\n'.join(fstab))
if startup_script:
log.info("Starting instance '%s' with startup script, this can take a while.", self.id)
else:
log.info("Starting instance '%s'", self.id)
try:
self.master.ezjail_admin(
'start',
name=self._name)
except EzjailError as e:
for line in e.args[0].splitlines():
log.error(line)
sys.exit(1)
def stop(self, overrides=None):
status = self._status()
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
return
if status != 'running':
log.info("Instance state: %s", status)
log.info("Instance not stopped")
return
log.info("Stopping instance '%s'", self.id)
self.master.ezjail_admin('stop', name=self._name)
log.info("Instance stopped")
def terminate(self):
jails = self.master.ezjail_admin('list')
status = self._status(jails)
if self.config.get('no-terminate', False):
log.error("Instance '%s' is configured not to be terminated.", self.id)
return
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
return
if status == 'running':
log.info("Stopping instance '%s'", self.id)
self.master.ezjail_admin('stop', name=self._name)
if status != 'stopped':
log.info('Waiting for jail to stop')
while status != 'stopped':
jails = self.master.ezjail_admin('list')
status = self._status(jails)
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
print
log.info("Terminating instance '%s'", self.id)
self.master.ezjail_admin('delete', name=self._name)
log.info("Instance terminated")
class ZFS_FS(object):
def __init__(self, zfs, name, config):
self._name = name
self.zfs = zfs
self.config = config
mp_args = (
"zfs", "get", "-Hp", "-o", "property,value",
"mountpoint", self['path'])
rc, rout, rerr = self.zfs.master._exec(*mp_args)
if rc != 0 and self.config.get('create', False):
args = ['zfs', 'create']
for k, v in self.config.items():
if not k.startswith('set-'):
continue
args.append("-o '%s=%s'" % (k[4:], v))
args.append(self['path'])
rc, out, err = self.zfs.master._exec(*args)
if rc != 0:
log.error(
"Couldn't create zfs filesystem '%s' at '%s'." % (
self._name, self['path']))
log.error(err)
sys.exit(1)
rc, out, err = self.zfs.master._exec(*mp_args)
if rc == 0:
info = out.strip().split('\t')
assert info[0] == 'mountpoint'
self.mountpoint = info[1]
return
log.error(
"Trying to use non existing zfs filesystem '%s' at '%s'." % (
self._name, self['path']))
sys.exit(1)
def __getitem__(self, key):
value = self.config[key]
if key == 'path':
return value.format(zfs=self.zfs)
return value
def __str__(self):
return self.mountpoint
class ZFS(object):
def __init__(self, master):
self.master = master
self.config = self.master.main_config.get('ez-zfs', {})
self._cache = {}
def __getitem__(self, key):
if key not in self._cache:
self._cache[key] = ZFS_FS(self, key, self.config[key])
return self._cache[key]
class EzjailProxyInstance(ProxyInstance):
def status(self):
result = None
hasstatus = hasattr(self._proxied_instance, 'status')
if hasstatus:
result = self._proxied_instance.status()
if not hasstatus or self._status() == 'running':
try:
jails = self.master.ezjail_admin('list')
except EzjailError as e:
log.error("Can't get status of jails: %s", e)
return result
unknown = set(jails)
for sid in sorted(self.master.instances):
if sid == self.id:
continue
instance = self.master.instances[sid]
unknown.discard(instance._name)
status = instance._status(jails)
sip = instance.config.get('ip', '')
jip = jails.get(instance._name, {}).get('ip', 'unknown ip')
if status == 'running' and jip != sip:
sip = "%s != configured %s" % (jip, sip)
log.info("%-20s %-15s %15s" % (sid, status, sip))
for sid in sorted(unknown):
jip = jails[sid].get('ip', 'unknown ip')
log.warn("Unknown jail found: %-20s %15s" % (sid, jip))
return result
class Master(BaseMaster):
sectiongroupname = 'ez-instance'
instance_class = Instance
_exec = None
def __init__(self, *args, **kwargs):
BaseMaster.__init__(self, *args, **kwargs)
self.debug = self.master_config.get('debug-commands', False)
if 'instance' not in self.master_config:
instance = PlainInstance(self, self.id, self.master_config)
else:
instance = self.master_config['instance']
if instance:
self.instance = EzjailProxyInstance(self, self.id, self.master_config, instance)
self.instance.sectiongroupname = 'ez-master'
self.instances[self.id] = self.instance
else:
self.instance = None
prefix_args = ()
if self.master_config.get('sudo'):
prefix_args = ('sudo',)
if self._exec is None:
self._exec = Executor(
instance=self.instance, prefix_args=prefix_args)
@lazy
def zfs(self):
return ZFS(self)
@lazy
def ezjail_admin_binary(self):
binary = self.master_config.get('ezjail-admin', '/usr/local/bin/ezjail-admin')
return binary
def _ezjail_admin(self, *args):
try:
return self._exec(self.ezjail_admin_binary, *args)
except socket.error as e:
raise EzjailError("Couldn't connect to instance [%s]:\n%s" % (self.instance.config_id, e))
@lazy
def ezjail_admin_list_headers(self):
rc, out, err = self._ezjail_admin('list')
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
lines = out.splitlines()
if len(lines) < 2:
raise EzjailError("ezjail-admin list output too short:\n%s" % out.strip())
headers = []
current = ""
for i, c in enumerate(lines[1]):
if c != '-' or i >= len(lines[0]):
headers.append(current.strip())
if i >= len(lines[0]):
break
current = ""
else:
current = current + lines[0][i]
if headers != ['STA', 'JID', 'IP', 'Hostname', 'Root Directory']:
raise EzjailError("ezjail-admin list output has unknown headers:\n%s" % headers)
return ('status', 'jid', 'ip', 'name', 'root')
def ezjail_admin(self, command, **kwargs):
# make sure there is no whitespace in the arguments
for k, v in kwargs.items():
if v is None:
continue
if command == 'console' and k == 'cmd':
continue
if len(v.split()) != 1:
log.error("The value '%s' of kwarg '%s' contains whitespace", v, k)
sys.exit(1)
if command == 'console':
return self._ezjail_admin(
'console',
'-e',
kwargs['cmd'],
kwargs['name'])
elif command == 'create':
args = [
'create',
'-c', 'zfs']
flavour = kwargs.get('flavour')
if flavour is not None:
args.extend(['-f', flavour])
args.extend([
kwargs['name'],
kwargs['ip']])
rc, out, err = self._ezjail_admin(*args)
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
elif command == 'delete':
rc, out, err = self._ezjail_admin(
'delete',
'-fw',
kwargs['name'])
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
elif command == 'list':
rc, out, err = self._ezjail_admin('list')
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
lines = out.splitlines()
if len(lines) < 2:
raise EzjailError("ezjail-admin list output too short:\n%s" % out.strip())
headers = self.ezjail_admin_list_headers
jails = {}
prev_entry = None
for line in lines[2:]:
line = line.strip()
if not line:
continue
if line.startswith('N/A') or line[0].isdigit():
# What if prev_entry is still None?
# the code fail here and someone who finds that failure
# will provide us with a patch!
jails[prev_entry]['ip'] = [jails[prev_entry]['ip'], line.split()[1]]
else:
entry = dict(zip(headers, line.split()))
prev_entry = entry.pop('name')
jails[prev_entry] = entry
return jails
elif command == 'start':
rc, out, err = self._ezjail_admin(
'start',
kwargs['name'])
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
elif command == 'stop':
rc, out, err = self._ezjail_admin(
'stop',
kwargs['name'])
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
else:
raise ValueError("Unknown command '%s'" % command)
class MountsMassager(BaseMassager):
def __call__(self, config, sectionname):
value = BaseMassager.__call__(self, config, sectionname)
mounts = []
for line in value.splitlines():
mount_options = line.split()
if not len(mount_options):
continue
options = {}
for mount_option in mount_options:
if '=' not in mount_option:
raise ValueError("Mount option '%s' contains no equal sign." % mount_option)
(key, value) = mount_option.split('=')
(key, value) = (key.strip(), value.strip())
if key == 'create':
value = value_asbool(value)
if value is None:
raise ValueError("Unknown value %s for option %s in %s of %s:%s." % (value, key, self.key, self.sectiongroupname, sectionname))
if key == 'ro':
value = value_asbool(value)
if value is None:
raise ValueError("Unknown value %s for option %s in %s of %s:%s." % (value, key, self.key, self.sectiongroupname, sectionname))
options[key] = value
mounts.append(options)
return tuple(mounts)
def get_common_massagers():
from ploy.plain import get_massagers as plain_massagers
return [(x.__class__, x.key) for x in plain_massagers()]
def get_instance_massagers(sectiongroupname='instance'):
from ploy.config import BooleanMassager
from ploy.config import StartupScriptMassager
massagers = []
for klass, name in get_common_massagers():
massagers.append(klass(sectiongroupname, name))
massagers.extend([
MountsMassager(sectiongroupname, 'mounts'),
BooleanMassager(sectiongroupname, 'no-terminate'),
StartupScriptMassager(sectiongroupname, 'startup_script')])
return massagers
def get_massagers():
from ploy.config import BooleanMassager
massagers = []
sectiongroupname = 'ez-instance'
massagers.extend(get_instance_massagers(sectiongroupname))
sectiongroupname = 'ez-master'
for klass, name in get_common_massagers():
massagers.append(klass(sectiongroupname, name))
massagers.extend([
BooleanMassager(sectiongroupname, 'sudo'),
BooleanMassager(sectiongroupname, 'debug-commands')])
sectiongroupname = 'ez-zfs'
massagers.extend([
BooleanMassager(sectiongroupname, 'create')])
return massagers
def get_masters(ploy):
masters = ploy.config.get('ez-master', {})
for master, master_config in masters.items():
yield Master(ploy, master, master_config)
plugin = dict(
get_massagers=get_massagers,
get_masters=get_masters)
| 36.282813 | 151 | 0.526248 | from fnmatch import fnmatch
from lazy import lazy
from ploy.common import BaseMaster, Executor, StartupScriptMixin
from ploy.common import parse_ssh_keygen
from ploy.config import BaseMassager, value_asbool
from ploy.plain import Instance as PlainInstance
from ploy.proxy import ProxyInstance
import logging
import re
import socket
import sys
import time
log = logging.getLogger('ploy_ezjail')
class EzjailError(Exception):
pass
rc_startup = """#!/bin/sh
#
# BEFORE: DAEMON
# PROVIDE: ploy_startup_script
#
# ploy startup script
. /etc/rc.subr
name=ploy_startup_script
start_cmd=startup
startup() {
# Remove traces of ourself
# N.B.: Do NOT rm $0, it points to /etc/rc
##########################
rm -f "/etc/rc.d/ploy_startup_script"
test -e /etc/startup_script && /etc/startup_script || true
test -e /etc/startup_script && chmod 0600 /etc/startup_script
}
run_rc_command "$1"
"""
class Instance(PlainInstance, StartupScriptMixin):
sectiongroupname = 'ez-instance'
_id_regexp = re.compile('^[a-zA-Z0-9_]+$')
@property
def _name(self):
return self.config.get('ezjail-name', self.id)
def validate_id(self, sid):
if self._id_regexp.match(sid) is None:
log.error("Invalid instance name '%s'. An ezjail instance name may only contain letters, numbers and underscores." % sid)
sys.exit(1)
return sid
def get_ip(self):
first_ip = self.config['ip']
if ',' in first_ip:
first_ip = first_ip.split(',')[0]
if '|' in first_ip:
first_ip = first_ip.split('|')[1]
return first_ip
def get_host(self):
return self.config.get('host', self.get_ip())
def get_fingerprint(self):
status = self._status()
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
sys.exit(1)
if status != 'running':
log.info("Instance state: %s", status)
sys.exit(1)
rc, out, err = self.master.ezjail_admin('console', name=self._name, cmd='ssh-keygen -lf /etc/ssh/ssh_host_rsa_key.pub')
info = out.split()
return info[1]
def get_fingerprints(self):
status = self._status()
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
sys.exit(1)
if status != 'running':
log.info("Instance state: %s", status)
sys.exit(1)
result = []
rc, out, err = self.master.ezjail_admin(
'console', name=self._name,
cmd='ls /etc/ssh/')
if rc != 0:
return result
pub_key_names = list(
x for x in out.splitlines()
if fnmatch(x, 'ssh_host*_key.pub'))
for pub_key_name in pub_key_names:
rc, out, err = self.master.ezjail_admin(
'console', name=self._name,
cmd='ssh-keygen -lf /etc/ssh/%s' % pub_key_name)
if rc != 0:
continue
(key,) = parse_ssh_keygen(out)
info = dict(
fingerprint=key.fingerprint,
keylen=key.keylen,
keytype=key.keytype)
result.append(info)
return result
def get_massagers(self):
return get_instance_massagers()
def init_ssh_key(self, user=None):
status = self._status()
if status == 'unavailable':
log.error("Instance '%s' unavailable", self.id)
raise self.paramiko.SSHException()
if status != 'running':
log.error("Instance state: %s", status)
raise self.paramiko.SSHException()
if 'proxyhost' not in self.config:
self.config['proxyhost'] = self.master.id
if 'proxycommand' not in self.config:
mi = self.master.instance
self.config['proxycommand'] = self.proxycommand_with_instance(mi)
return PlainInstance.init_ssh_key(self, user=user)
def _status(self, jails=None):
if jails is None:
jails = self.master.ezjail_admin('list')
if self._name not in jails:
return 'unavailable'
jail = jails[self._name]
status = jail['status']
if len(status) != 2 or status[0] not in 'DIEBZ' or status[1] not in 'RAS':
raise EzjailError("Invalid jail status '%s' for '%s'" % (status, self._name))
if status[1] == 'R':
return 'running'
elif status[1] == 'S':
return 'stopped'
raise EzjailError("Don't know how to handle mounted but not running jail '%s'" % self._name)
def status(self):
try:
jails = self.master.ezjail_admin('list')
except EzjailError as e:
log.error("Can't get status of jails: %s", e)
return
status = self._status(jails)
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
return
if status != 'running':
log.info("Instance state: %s", status)
return
log.info("Instance running.")
log.info("Instances jail id: %s" % jails[self._name]['jid'])
if self._name != self.id:
log.info("Instances jail name: %s" % self._name)
log.info("Instances jail ip: %s" % jails[self._name]['ip'])
def start(self, overrides=None):
jails = self.master.ezjail_admin('list')
status = self._status(jails)
startup_script = None
if status == 'unavailable':
startup_script = self.startup_script(overrides=overrides)
log.info("Creating instance '%s'", self.id)
if 'ip' not in self.config:
log.error("No IP address set for instance '%s'", self.id)
sys.exit(1)
try:
self.master.ezjail_admin(
'create',
name=self._name,
ip=self.config['ip'],
flavour=self.config.get('flavour'))
except EzjailError as e:
for line in e.args[0].splitlines():
log.error(line)
sys.exit(1)
jails = self.master.ezjail_admin('list')
jail = jails.get(self._name)
startup_dest = '%s/etc/startup_script' % jail['root']
rc, out, err = self.master._exec(
'sh', '-c', 'cat - > "%s"' % startup_dest,
stdin=startup_script)
if rc != 0:
log.error("Startup script creation failed.")
log.error(err)
sys.exit(1)
rc, out, err = self.master._exec("chmod", "0700", startup_dest)
if rc != 0:
log.error("Startup script chmod failed.")
log.error(err)
sys.exit(1)
rc_startup_dest = '%s/etc/rc.d/ploy_startup_script' % jail['root']
rc, out, err = self.master._exec(
'sh', '-c', 'cat - > "%s"' % rc_startup_dest,
stdin=rc_startup)
if rc != 0:
log.error("Startup rc script creation failed.")
log.error(err)
sys.exit(1)
rc, out, err = self.master._exec("chmod", "0700", rc_startup_dest)
if rc != 0:
log.error("Startup rc script chmod failed.")
log.error(err)
sys.exit(1)
status = self._status(jails)
if status != 'stopped':
log.info("Instance state: %s", status)
log.info("Instance already started")
return True
rc_provide = self.config.get('rc_provide', '')
self.master._exec(
"sed",
"-i",
"",
"-e",
"s/\# PROVIDE:.*$/\# PROVIDE: standard_ezjail %s %s/" % (self._name, rc_provide),
"/usr/local/etc/ezjail/%s" % self._name)
rc_require = self.config.get('rc_require')
if rc_require is not None:
self.master._exec(
"sed",
"-i",
"",
"-e",
"s/\# REQUIRE:.*$/\# REQUIRE: %s/" % rc_require,
"/usr/local/etc/ezjail/%s" % self._name)
mounts = []
for mount in self.config.get('mounts', []):
src = mount['src'].format(
zfs=self.master.zfs,
name=self._name)
dst = mount['dst'].format(
name=self._name)
create_mount = mount.get('create', False)
mounts.append(dict(src=src, dst=dst, ro=mount.get('ro', False)))
if create_mount:
rc, out, err = self.master._exec("mkdir", "-p", src)
if rc != 0:
log.error("Couldn't create source directory '%s' for mountpoint '%s'." % src, mount['src'])
log.error(err)
sys.exit(1)
if mounts:
jail = jails.get(self._name)
jail_fstab = '/etc/fstab.%s' % self._name
jail_root = jail['root'].rstrip('/')
log.info("Setting up mount points")
rc, out, err = self.master._exec("head", "-n", "1", jail_fstab)
fstab = out.splitlines()
fstab = fstab[:1]
fstab.append('
for mount in mounts:
self.master._exec(
"mkdir", "-p", "%s%s" % (jail_root, mount['dst']))
if mount['ro']:
mode = 'ro'
else:
mode = 'rw'
fstab.append('%s %s%s nullfs %s 0 0' % (mount['src'], jail_root, mount['dst'], mode))
fstab.append('')
rc, out, err = self.master._exec(
'sh', '-c', 'cat - > "%s"' % jail_fstab,
stdin='\n'.join(fstab))
if startup_script:
log.info("Starting instance '%s' with startup script, this can take a while.", self.id)
else:
log.info("Starting instance '%s'", self.id)
try:
self.master.ezjail_admin(
'start',
name=self._name)
except EzjailError as e:
for line in e.args[0].splitlines():
log.error(line)
sys.exit(1)
def stop(self, overrides=None):
status = self._status()
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
return
if status != 'running':
log.info("Instance state: %s", status)
log.info("Instance not stopped")
return
log.info("Stopping instance '%s'", self.id)
self.master.ezjail_admin('stop', name=self._name)
log.info("Instance stopped")
def terminate(self):
jails = self.master.ezjail_admin('list')
status = self._status(jails)
if self.config.get('no-terminate', False):
log.error("Instance '%s' is configured not to be terminated.", self.id)
return
if status == 'unavailable':
log.info("Instance '%s' unavailable", self.id)
return
if status == 'running':
log.info("Stopping instance '%s'", self.id)
self.master.ezjail_admin('stop', name=self._name)
if status != 'stopped':
log.info('Waiting for jail to stop')
while status != 'stopped':
jails = self.master.ezjail_admin('list')
status = self._status(jails)
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
print
log.info("Terminating instance '%s'", self.id)
self.master.ezjail_admin('delete', name=self._name)
log.info("Instance terminated")
class ZFS_FS(object):
def __init__(self, zfs, name, config):
self._name = name
self.zfs = zfs
self.config = config
mp_args = (
"zfs", "get", "-Hp", "-o", "property,value",
"mountpoint", self['path'])
rc, rout, rerr = self.zfs.master._exec(*mp_args)
if rc != 0 and self.config.get('create', False):
args = ['zfs', 'create']
for k, v in self.config.items():
if not k.startswith('set-'):
continue
args.append("-o '%s=%s'" % (k[4:], v))
args.append(self['path'])
rc, out, err = self.zfs.master._exec(*args)
if rc != 0:
log.error(
"Couldn't create zfs filesystem '%s' at '%s'." % (
self._name, self['path']))
log.error(err)
sys.exit(1)
rc, out, err = self.zfs.master._exec(*mp_args)
if rc == 0:
info = out.strip().split('\t')
assert info[0] == 'mountpoint'
self.mountpoint = info[1]
return
log.error(
"Trying to use non existing zfs filesystem '%s' at '%s'." % (
self._name, self['path']))
sys.exit(1)
def __getitem__(self, key):
value = self.config[key]
if key == 'path':
return value.format(zfs=self.zfs)
return value
def __str__(self):
return self.mountpoint
class ZFS(object):
def __init__(self, master):
self.master = master
self.config = self.master.main_config.get('ez-zfs', {})
self._cache = {}
def __getitem__(self, key):
if key not in self._cache:
self._cache[key] = ZFS_FS(self, key, self.config[key])
return self._cache[key]
class EzjailProxyInstance(ProxyInstance):
def status(self):
result = None
hasstatus = hasattr(self._proxied_instance, 'status')
if hasstatus:
result = self._proxied_instance.status()
if not hasstatus or self._status() == 'running':
try:
jails = self.master.ezjail_admin('list')
except EzjailError as e:
log.error("Can't get status of jails: %s", e)
return result
unknown = set(jails)
for sid in sorted(self.master.instances):
if sid == self.id:
continue
instance = self.master.instances[sid]
unknown.discard(instance._name)
status = instance._status(jails)
sip = instance.config.get('ip', '')
jip = jails.get(instance._name, {}).get('ip', 'unknown ip')
if status == 'running' and jip != sip:
sip = "%s != configured %s" % (jip, sip)
log.info("%-20s %-15s %15s" % (sid, status, sip))
for sid in sorted(unknown):
jip = jails[sid].get('ip', 'unknown ip')
log.warn("Unknown jail found: %-20s %15s" % (sid, jip))
return result
class Master(BaseMaster):
sectiongroupname = 'ez-instance'
instance_class = Instance
_exec = None
def __init__(self, *args, **kwargs):
BaseMaster.__init__(self, *args, **kwargs)
self.debug = self.master_config.get('debug-commands', False)
if 'instance' not in self.master_config:
instance = PlainInstance(self, self.id, self.master_config)
else:
instance = self.master_config['instance']
if instance:
self.instance = EzjailProxyInstance(self, self.id, self.master_config, instance)
self.instance.sectiongroupname = 'ez-master'
self.instances[self.id] = self.instance
else:
self.instance = None
prefix_args = ()
if self.master_config.get('sudo'):
prefix_args = ('sudo',)
if self._exec is None:
self._exec = Executor(
instance=self.instance, prefix_args=prefix_args)
@lazy
def zfs(self):
return ZFS(self)
@lazy
def ezjail_admin_binary(self):
binary = self.master_config.get('ezjail-admin', '/usr/local/bin/ezjail-admin')
return binary
def _ezjail_admin(self, *args):
try:
return self._exec(self.ezjail_admin_binary, *args)
except socket.error as e:
raise EzjailError("Couldn't connect to instance [%s]:\n%s" % (self.instance.config_id, e))
@lazy
def ezjail_admin_list_headers(self):
rc, out, err = self._ezjail_admin('list')
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
lines = out.splitlines()
if len(lines) < 2:
raise EzjailError("ezjail-admin list output too short:\n%s" % out.strip())
headers = []
current = ""
for i, c in enumerate(lines[1]):
if c != '-' or i >= len(lines[0]):
headers.append(current.strip())
if i >= len(lines[0]):
break
current = ""
else:
current = current + lines[0][i]
if headers != ['STA', 'JID', 'IP', 'Hostname', 'Root Directory']:
raise EzjailError("ezjail-admin list output has unknown headers:\n%s" % headers)
return ('status', 'jid', 'ip', 'name', 'root')
def ezjail_admin(self, command, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if command == 'console' and k == 'cmd':
continue
if len(v.split()) != 1:
log.error("The value '%s' of kwarg '%s' contains whitespace", v, k)
sys.exit(1)
if command == 'console':
return self._ezjail_admin(
'console',
'-e',
kwargs['cmd'],
kwargs['name'])
elif command == 'create':
args = [
'create',
'-c', 'zfs']
flavour = kwargs.get('flavour')
if flavour is not None:
args.extend(['-f', flavour])
args.extend([
kwargs['name'],
kwargs['ip']])
rc, out, err = self._ezjail_admin(*args)
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
elif command == 'delete':
rc, out, err = self._ezjail_admin(
'delete',
'-fw',
kwargs['name'])
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
elif command == 'list':
rc, out, err = self._ezjail_admin('list')
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
lines = out.splitlines()
if len(lines) < 2:
raise EzjailError("ezjail-admin list output too short:\n%s" % out.strip())
headers = self.ezjail_admin_list_headers
jails = {}
prev_entry = None
for line in lines[2:]:
line = line.strip()
if not line:
continue
if line.startswith('N/A') or line[0].isdigit():
jails[prev_entry]['ip'] = [jails[prev_entry]['ip'], line.split()[1]]
else:
entry = dict(zip(headers, line.split()))
prev_entry = entry.pop('name')
jails[prev_entry] = entry
return jails
elif command == 'start':
rc, out, err = self._ezjail_admin(
'start',
kwargs['name'])
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
elif command == 'stop':
rc, out, err = self._ezjail_admin(
'stop',
kwargs['name'])
if rc:
msg = out.strip() + '\n' + err.strip()
raise EzjailError(msg.strip())
else:
raise ValueError("Unknown command '%s'" % command)
class MountsMassager(BaseMassager):
def __call__(self, config, sectionname):
value = BaseMassager.__call__(self, config, sectionname)
mounts = []
for line in value.splitlines():
mount_options = line.split()
if not len(mount_options):
continue
options = {}
for mount_option in mount_options:
if '=' not in mount_option:
raise ValueError("Mount option '%s' contains no equal sign." % mount_option)
(key, value) = mount_option.split('=')
(key, value) = (key.strip(), value.strip())
if key == 'create':
value = value_asbool(value)
if value is None:
raise ValueError("Unknown value %s for option %s in %s of %s:%s." % (value, key, self.key, self.sectiongroupname, sectionname))
if key == 'ro':
value = value_asbool(value)
if value is None:
raise ValueError("Unknown value %s for option %s in %s of %s:%s." % (value, key, self.key, self.sectiongroupname, sectionname))
options[key] = value
mounts.append(options)
return tuple(mounts)
def get_common_massagers():
from ploy.plain import get_massagers as plain_massagers
return [(x.__class__, x.key) for x in plain_massagers()]
def get_instance_massagers(sectiongroupname='instance'):
from ploy.config import BooleanMassager
from ploy.config import StartupScriptMassager
massagers = []
for klass, name in get_common_massagers():
massagers.append(klass(sectiongroupname, name))
massagers.extend([
MountsMassager(sectiongroupname, 'mounts'),
BooleanMassager(sectiongroupname, 'no-terminate'),
StartupScriptMassager(sectiongroupname, 'startup_script')])
return massagers
def get_massagers():
from ploy.config import BooleanMassager
massagers = []
sectiongroupname = 'ez-instance'
massagers.extend(get_instance_massagers(sectiongroupname))
sectiongroupname = 'ez-master'
for klass, name in get_common_massagers():
massagers.append(klass(sectiongroupname, name))
massagers.extend([
BooleanMassager(sectiongroupname, 'sudo'),
BooleanMassager(sectiongroupname, 'debug-commands')])
sectiongroupname = 'ez-zfs'
massagers.extend([
BooleanMassager(sectiongroupname, 'create')])
return massagers
def get_masters(ploy):
masters = ploy.config.get('ez-master', {})
for master, master_config in masters.items():
yield Master(ploy, master, master_config)
plugin = dict(
get_massagers=get_massagers,
get_masters=get_masters)
| true | true |
1c3156b5704c9bf5279b31e089a46032268f65f0 | 703 | py | Python | label_import/label.py | jscheytt/endo-loc | e425716e434087157ada3e35c309281dda3eca62 | [
"Apache-2.0"
] | null | null | null | label_import/label.py | jscheytt/endo-loc | e425716e434087157ada3e35c309281dda3eca62 | [
"Apache-2.0"
] | null | null | null | label_import/label.py | jscheytt/endo-loc | e425716e434087157ada3e35c309281dda3eca62 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
class ILabelValue(Enum):
"""
Possible values for an ILabel.
"""
IN, OUT, MOVING_IN, MOVING_OUT, IN_BETWEEN, EXIT = range(6)
ADS = -1
class ILabel:
"""
Label denoting location of the endoscope.
"""
def __init__(self, start, end, value):
"""
Build an ILabel obj.
:param start: Timestamp obj for start of segment containing label
:param end: Timestamp obj for end of segment containing label
:param value: ILabelValue obj
"""
self.start = start
self.end = end
self.value = value
def __len__(self):
return len(self.start) + len(self.end) + self.value.value
| 21.96875 | 73 | 0.601707 | from enum import Enum
class ILabelValue(Enum):
IN, OUT, MOVING_IN, MOVING_OUT, IN_BETWEEN, EXIT = range(6)
ADS = -1
class ILabel:
def __init__(self, start, end, value):
self.start = start
self.end = end
self.value = value
def __len__(self):
return len(self.start) + len(self.end) + self.value.value
| true | true |
1c3157ffaa98687e0d1d3da9cbb7888ecc676368 | 2,145 | py | Python | JarekG/2_python_controlflow/file_read_d.py | Khayn/2021-12-elearning-pythonana | a54e407adc8fb8c3a5fd2522735ae09cdef6540a | [
"MIT"
] | null | null | null | JarekG/2_python_controlflow/file_read_d.py | Khayn/2021-12-elearning-pythonana | a54e407adc8fb8c3a5fd2522735ae09cdef6540a | [
"MIT"
] | null | null | null | JarekG/2_python_controlflow/file_read_d.py | Khayn/2021-12-elearning-pythonana | a54e407adc8fb8c3a5fd2522735ae09cdef6540a | [
"MIT"
] | null | null | null | """
* Assignment: File Read CleanFile
* Required: no
* Complexity: medium
* Lines of code: 10 lines
* Time: 8 min
English:
1. Read `FILE` and for each line:
a. Remove leading and trailing whitespaces
b. Skip line if it is empty
c. Split line by whitespace
d. Separate IP address and hosts names
e. Append IP address and hosts names to `result`
2. Merge hostnames for the same IP
3. Run doctests - all must succeed
Polish:
1. Wczytaj `FILE` i dla każdej linii:
a. Usuń białe znaki na początku i końcu linii
b. Pomiń linię, jeżeli jest pusta
c. Podziel linię po białych znakach
d. Odseparuj adres IP i nazwy hostów
e. Dodaj adres IP i nazwy hostów do `result`
2. Scal nazwy hostów dla tego samego IP
3. Uruchom doctesty - wszystkie muszą się powieść
Hints:
* `str.isspace()`
* `str.split()`
* `str.strip()`
* `with`
* `open()`
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> from os import remove; remove(FILE)
>>> assert result is not Ellipsis, \
'Assign result to variable: `result`'
>>> assert type(result) is dict, \
'Variable `result` has invalid type, should be dict'
>>> assert all(type(x) is str for x in result.keys()), \
'All keys in `result` should be str'
>>> assert all(type(x) is list for x in result.values()), \
'All values in `result` should be list'
>>> result # doctest: +NORMALIZE_WHITESPACE
{'127.0.0.1': ['localhost'],
'10.13.37.1': ['nasa.gov', 'esa.int', 'roscosmos.ru'],
'255.255.255.255': ['broadcasthost'],
'::1': ['localhost']}
"""
FILE = '_temporary.txt'
DATA = """127.0.0.1 localhost
10.13.37.1 nasa.gov esa.int roscosmos.ru
255.255.255.255 broadcasthost
::1 localhost
"""
with open(FILE, mode='w') as file:
file.write(DATA)
# dict[str,list[str]]: example {'10.13.37.1': ['nasa.gov', 'esa.int', 'roscosmos.ru'], ...}
result = {}
with open(FILE, mode='r') as f:
lines = f.readlines()
for line in lines:
ip, *hosts = line.strip().split()
result.setdefault(ip, []).extend(hosts)
| 28.6 | 91 | 0.61352 |
FILE = '_temporary.txt'
DATA = """127.0.0.1 localhost
10.13.37.1 nasa.gov esa.int roscosmos.ru
255.255.255.255 broadcasthost
::1 localhost
"""
with open(FILE, mode='w') as file:
file.write(DATA)
result = {}
with open(FILE, mode='r') as f:
lines = f.readlines()
for line in lines:
ip, *hosts = line.strip().split()
result.setdefault(ip, []).extend(hosts)
| true | true |
1c3158a78f65cde7e262ec2d3dac6a9d0a56e74d | 35,011 | py | Python | rr_graph/channel.py | mithro/symbiflow-rr-graph | 33c7d952b72d03bd40265df13900526846abc7d5 | [
"0BSD"
] | 2 | 2020-11-25T03:00:33.000Z | 2021-12-21T07:29:44.000Z | rr_graph/channel.py | mithro/symbiflow-rr-graph | 33c7d952b72d03bd40265df13900526846abc7d5 | [
"0BSD"
] | 10 | 2020-07-24T17:09:35.000Z | 2022-02-07T17:55:49.000Z | rr_graph/channel.py | mithro/symbiflow-rr-graph | 33c7d952b72d03bd40265df13900526846abc7d5 | [
"0BSD"
] | 4 | 2020-11-06T09:07:14.000Z | 2021-09-17T16:38:19.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
"""
This file for packing tracks into channels.
It does *not* manage channel XML nodes.
Note channels go between switchboxes. Switchboxes cannot be at the last grid
coordinate.
Therefore, you need a grid size of at least 3 rows or cols to allow any
channels to exist. With a 3 width configuration you would get a channel of
length 0, connecting the switchbox at 0 to the switchbox at 1.
With above in mind, objects here entirely omit the last row/col and placing a
channel in the first is illegal.
Specifically:
* For CHANX: X=0 is invalid, X=grid.width-1 is invalid
* For CHANY: Y=0 is invalid, Y=grid.height-1 is invalid
"""
import pprint
import enum
import io
from collections import namedtuple
import lxml.etree as ET
from . import Position
from . import Size
from . import node_pos, single_element
from .utils.asserts import assert_eq
from .utils.asserts import assert_len_eq
from .utils.asserts import assert_type
from .utils.asserts import assert_type_or_none
# FIXME: doctests and static_property are not playing nice together.
# from . import static_property
static_property = property
class ChannelNotStraight(TypeError):
pass
_Track = namedtuple(
"Track", ("start", "end", "direction", "segment_id", "idx")
)
class Track(_Track):
"""
Represents a single CHANX or CHANY (track) within a channel.
IE The tracks of a x_list or y_list entry in <channels> element.
start: start Pos
end: end Pos
idx: XML index integer
"""
class Type(enum.Enum):
"""
subset of NodeType in graph2
"""
# Horizontal routing
X = 'CHANX'
# Vertical routing
Y = 'CHANY'
def __repr__(self):
return 'Track.Type.' + self.name
class Direction(enum.Enum):
INC = 'INC_DIR'
DEC = 'DEC_DIR'
BI = 'BI_DIR'
def __repr__(self):
return 'Track.Direction.' + self.name
def __new__(
cls,
start,
end,
direction=Direction.INC,
segment_id=0,
idx=None,
name=None,
type_hint=None,
):
"""Make most but not all attributes immutable"""
if not isinstance(start, Position):
start = Position(*start)
assert_type(start, Position)
if not isinstance(end, Position):
end = Position(*end)
assert_type(end, Position)
if start.x != end.x and start.y != end.y:
raise ChannelNotStraight(
"Track not straight! {}->{}".format(start, end)
)
assert_type(direction, cls.Direction)
assert_type(segment_id, int)
assert_type_or_none(idx, int)
assert_type_or_none(name, str)
assert_type_or_none(type_hint, cls.Type)
obj = _Track.__new__(cls, start, end, direction, segment_id, idx)
obj.name = name
obj.type_hint = type_hint
return obj
@static_property
def type(self):
"""Type of the channel.
Returns: Track.Type
>>> Track((1, 0), (10, 0)).type
Track.Type.X
>>> Track((0, 1), (0, 10)).type
Track.Type.Y
>>> Track((1, 1), (1, 1)).type
Traceback (most recent call last):
...
ValueError: Ambiguous type
>>> Track((1, 1), (1, 1), type_hint=Track.Type.X).type
Track.Type.X
>>> Track((1, 1), (1, 1), type_hint=Track.Type.Y).type
Track.Type.Y
"""
if self.type_hint:
return self.type_hint
guess = self.type_guess
if guess is None:
raise ValueError("Ambiguous type")
return guess
@static_property
def type_guess(self):
"""Type of the channel.
Returns: Track.Type
>>> Track((1, 0), (10, 0)).type_guess
Track.Type.X
>>> Track((0, 1), (0, 10)).type_guess
Track.Type.Y
>>> str(Track((1, 1), (1, 1)).type_guess)
'None'
"""
if self.start.x == self.end.x and self.start.y == self.end.y:
return None
elif self.start.x == self.end.x:
return Track.Type.Y
elif self.start.y == self.end.y:
return Track.Type.X
else:
assert False, self
def positions(self):
"""Generate all positions this track occupies"""
startx, endx = sorted([self.start.x, self.end.x])
starty, endy = sorted([self.start.y, self.end.y])
for x in range(startx, endx + 1):
for y in range(starty, endy + 1):
yield Position(x, y)
@static_property
def start0(self):
"""The non-constant start coordinate.
>>> Track((1, 0), (10, 0)).start0
1
>>> Track((0, 1), (0, 10)).start0
1
>>> Track((1, 1), (1, 1)).start0
Traceback (most recent call last):
...
ValueError: Ambiguous type
>>> Track((1, 1), (1, 1), type_hint=Track.Type.X).start0
1
>>> Track((10, 0), (1, 0)).start0
10
>>> Track((0, 10), (0, 1)).start0
10
"""
if self.type == Track.Type.X:
return self.start.x
elif self.type == Track.Type.Y:
return self.start.y
else:
assert False
@static_property
def end0(self):
"""The non-constant end coordinate.
>>> Track((1, 0), (10, 0)).end0
10
>>> Track((0, 1), (0, 10)).end0
10
>>> Track((1, 1), (1, 1)).end0
Traceback (most recent call last):
...
ValueError: Ambiguous type
>>> Track((1, 1), (1, 1), type_hint=Track.Type.X).end0
1
>>> Track((10, 0), (1, 0)).end0
1
>>> Track((0, 10), (0, 1)).end0
1
"""
if self.type == Track.Type.X:
return self.end.x
elif self.type == Track.Type.Y:
return self.end.y
else:
assert False, self.type
@static_property
def common(self):
"""The common coordinate value.
>>> Track((0, 0), (10, 0)).common
0
>>> Track((0, 0), (0, 10)).common
0
>>> Track((1, 1), (1, 1)).common
Traceback (most recent call last):
...
ValueError: Ambiguous type
>>> Track((1, 1), (1, 1), type_hint=Track.Type.X).common
1
>>> Track((10, 0), (0, 0)).common
0
>>> Track((0, 10), (0, 0)).common
0
>>> Track((4, 10), (4, 0)).common
4
"""
if self.type == Track.Type.X:
assert_eq(self.start.y, self.end.y)
return self.start.y
elif self.type == Track.Type.Y:
assert_eq(self.start.x, self.end.x)
return self.start.x
else:
assert False
@static_property
def length(self):
"""Length of the track.
>>> Track((0, 0), (10, 0)).length
10
>>> Track((0, 0), (0, 10)).length
10
>>> Track((1, 1), (1, 1)).length
0
>>> Track((10, 0), (0, 0)).length
10
>>> Track((0, 10), (0, 0)).length
10
"""
try:
return abs(self.end0 - self.start0)
except ValueError:
return 0
def new_idx(self, idx):
"""Create a new channel with the same start/end but new index value.
>>> s = (1, 4)
>>> e = (1, 8)
>>> c1 = Track(s, e, idx=0)
>>> c2 = c1.new_idx(2)
>>> assert_eq(c1.start, c2.start)
>>> assert_eq(c1.end, c2.end)
>>> c1.idx
0
>>> c2.idx
2
"""
return self.__class__(
self.start,
self.end,
self.direction,
self.segment_id,
idx,
name=self.name,
type_hint=self.type_hint,
)
def __repr__(self):
"""
>>> repr(Track((0, 0), (10, 0)))
'T((0,0), (10,0))'
>>> repr(Track((0, 0), (0, 10)))
'T((0,0), (0,10))'
>>> repr(Track((1, 2), (3, 2), idx=5))
'T((1,2), (3,2), 5)'
>>> repr(Track((1, 2), (3, 2), name="ABC"))
'T(ABC)'
>>> repr(Track((1, 2), (3, 2), idx=5, name="ABC"))
'T(ABC,5)'
"""
if self.name:
idx_str = ""
if self.idx is not None:
idx_str = ",{}".format(self.idx)
return "T({}{})".format(self.name, idx_str)
idx_str = ""
if self.idx is not None:
idx_str = ", {}".format(self.idx)
return "T(({},{}), ({},{}){})".format(
self.start.x, self.start.y, self.end.x, self.end.y, idx_str
)
def __str__(self):
"""
>>> str(Track((0, 0), (10, 0)))
'CHANX 0,0->10,0'
>>> str(Track((0, 0), (0, 10)))
'CHANY 0,0->0,10'
>>> str(Track((1, 2), (3, 2), idx=5))
'CHANX 1,2->3,2 @5'
>>> str(Track((1, 2), (3, 2), name="ABC"))
'ABC'
>>> str(Track((1, 2), (3, 2), idx=5, name="ABC"))
'ABC@5'
"""
idx_str = ""
if self.idx is not None:
idx_str = " @{}".format(self.idx)
if self.name:
return "{}{}".format(self.name, idx_str[1:])
return "{} {},{}->{},{}{}".format(
self.type.value, self.start.x, self.start.y, self.end.x,
self.end.y, idx_str
)
# Short alias.
T = Track
class ChannelGrid(dict):
"""
Functionality:
* Manages single type of track (either `CHANX` or `CHANY`).
* Manages channel width along grid.
* Allocates tracks within channels.
The `ChannelGrid` is indexed by `Position` and returns a sequence width all
the `Track`s at that position.
"""
def __init__(self, size, chan_type):
"""
size: Size representing tile grid width/height
chan_type: of Channels.Type
"""
self.chan_type = chan_type
self.size = Size(*size)
self.clear()
@property
def width(self):
"""Grid width
>>> g = ChannelGrid((6, 7), Track.Type.Y)
>>> g.width
6
"""
return self.size.width
@property
def height(self):
"""Grid height
>>> g = ChannelGrid((6, 7), Track.Type.Y)
>>> g.height
7
"""
return self.size.height
def column(self, x):
"""Get a y coordinate indexed list giving tracks at that x + y position"""
column = []
for y in range(0, self.height):
column.append(self[Position(x, y)])
return column
def row(self, y):
"""Get an x coordinate indexed list giving tracks at that x + y position"""
row = []
for x in range(0, self.width):
row.append(self[Position(x, y)])
return row
"""
dim_*: CHANX/CHANY abstraction functions
These can be used to write code that is not aware of specifics related to CHANX vs CHANY
"""
def dim_rc(self):
"""Get dimension a, the number of row/col positions"""
return {
Track.Type.X: self.height,
Track.Type.Y: self.width,
}[self.chan_type]
def dim_chanl(self):
"""Get dimension b, the number of valid track positions within a specific channel"""
return {
Track.Type.X: self.width,
Track.Type.Y: self.height,
}[self.chan_type]
def foreach_position(self):
"""Generate all valid placement positions (exclude border)"""
xmin, ymin = {
Track.Type.X: (1, 0),
Track.Type.Y: (0, 1),
}[self.chan_type]
for row in range(ymin, self.height):
for col in range(xmin, self.width):
yield Position(col, row)
def foreach_track(self):
"""Generate all current legal channel positions (exclude border)"""
for pos in self.foreach_position():
for ti, t in enumerate(self[pos]):
yield (pos, ti, t)
def slicen(self):
"""Get grid width or height corresponding to chanx/chany type"""
return {
Track.Type.X: self.height,
Track.Type.Y: self.width,
}[self.chan_type]
def slice(self, i):
"""Get row or col corresponding to chanx/chany type"""
return {
Track.Type.X: self.row,
Track.Type.Y: self.column,
}[self.chan_type](
i
)
def track_slice(self, t):
"""Get the row or column the track runs along"""
return {
Track.Type.X: self.row,
Track.Type.Y: self.column,
}[t.type](
t.common
)
def tracks(self):
"""Get all channels in a set"""
ret = set()
for _pos, _ti, t in self.foreach_track():
ret.add(t)
return ret
def validate_pos(self, pos, msg=''):
"""
A channel must go between switchboxes (where channels can cross)
Channels are upper right of tile
Therefore, the first position in a channel cannot have a track because
there is no proceeding switchbox"""
if msg:
msg = msg + ': '
# Gross error out of grid
if pos.x < 0 or pos.y < 0 or pos.x >= self.width or pos.y >= self.height:
raise ValueError(
"%sGrid %s, point %s out of grid size coordinate" %
(msg, self.size, pos)
)
if self.chan_type == Track.Type.X and pos.x == 0:
raise ValueError("%sInvalid CHANX x=0 point %s" % (msg, pos))
if self.chan_type == Track.Type.Y and pos.y == 0:
raise ValueError("%sInvalid CHANY y=0 point %s" % (msg, pos))
def create_track(self, t, idx=None):
"""
Channel allocator
Finds an optimal place to put the channel, increasing the channel width if necessary
If idx is given, it must go there
Throw exception if location is already occupied
>>> g = ChannelGrid((11, 11), Track.Type.X)
>>> # Adding the first channel
>>> g.create_track(Track((1, 6), (4, 6), name="A"))
T(A,0)
>>> g[(1,6)]
[T(A,0)]
>>> g[(2,6)]
[T(A,0)]
>>> g[(4,6)]
[T(A,0)]
>>> g[(5,6)]
[None]
>>> # Adding second non-overlapping second channel
>>> g.create_track(Track((5, 6), (7, 6), name="B"))
T(B,0)
>>> g[(4,6)]
[T(A,0)]
>>> g[(5,6)]
[T(B,0)]
>>> g[(7,6)]
[T(B,0)]
>>> g[(8,6)]
[None]
>>> # Adding third channel which overlaps with second channel
>>> g.create_track(Track((5, 6), (7, 6), name="T"))
T(T,1)
>>> g[(4,6)]
[T(A,0), None]
>>> g[(5,6)]
[T(B,0), T(T,1)]
>>> g[(7,6)]
[T(B,0), T(T,1)]
>>> # Adding a channel which overlaps, but is a row over
>>> g.create_track(Track((5, 7), (7, 7), name="D"))
T(D,0)
>>> g[(5,6)]
[T(B,0), T(T,1)]
>>> g[(5,7)]
[T(D,0)]
>>> # Adding fourth channel which overlaps both the first
>>> # and second+third channel
>>> g.create_track(Track((3, 6), (6, 6), name="E"))
T(E,2)
>>> g[(2,6)]
[T(A,0), None, None]
>>> g[(3,6)]
[T(A,0), None, T(E,2)]
>>> g[(6,6)]
[T(B,0), T(T,1), T(E,2)]
>>> g[(7,6)]
[T(B,0), T(T,1), None]
>>> # This channel fits in the hole left by the last one.
>>> g.create_track(Track((1, 6), (3, 6), name="F"))
T(F,1)
>>> g[(1,6)]
[T(A,0), T(F,1), None]
>>> g[(2,6)]
[T(A,0), T(F,1), None]
>>> g[(3,6)]
[T(A,0), T(F,1), T(E,2)]
>>> g[(4,6)]
[T(A,0), None, T(E,2)]
>>> # Add another channel which causes a hole
>>> g.create_track(Track((1, 6), (7, 6), name="G"))
T(G,3)
>>> g[(1,6)]
[T(A,0), T(F,1), None, T(G,3)]
>>> g[(2,6)]
[T(A,0), T(F,1), None, T(G,3)]
>>> g[(3,6)]
[T(A,0), T(F,1), T(E,2), T(G,3)]
>>> g[(4,6)]
[T(A,0), None, T(E,2), T(G,3)]
>>> g[(5,6)]
[T(B,0), T(T,1), T(E,2), T(G,3)]
>>> g[(6,6)]
[T(B,0), T(T,1), T(E,2), T(G,3)]
>>> g[(7,6)]
[T(B,0), T(T,1), None, T(G,3)]
>>> g[(8,6)]
[None, None, None, None]
"""
assert t.idx is None
force_idx = idx
self.validate_pos(t.start, 'start')
self.validate_pos(t.end, 'end')
if t.type_guess != self.chan_type:
if t.length != 0:
raise TypeError(
"Can only add channels of type {} which {} ({}) is not.".
format(self.chan_type, t, t.type)
)
else:
t.type_hint = self.chan_type
c = self.track_slice(t)
assert_len_eq(c)
# Find start and end
s, e = min(t.start0, t.end0), max(t.start0, t.end0)
assert e >= s, (e, '>=', s)
assert s < len(c), (s, '<', len(c), c)
assert e < len(c), (e + 1, '<', len(c), c)
# Find a idx that this channel fits.
# Normally start at first channel (0) unless forcing to a specific channel
max_idx = force_idx if force_idx is not None else 0
while True:
# Check each position if the track can fit
# Expanding channel width as required index grows
for p in c[s:e + 1]:
while len(p) < max_idx + 1:
p.append(None)
# Can't place here?
if p[max_idx] is not None:
# Grow track width
if force_idx is not None:
raise IndexError(
"Can't fit channel at index %d" % force_idx
)
max_idx += 1
break
# Was able to place into all locations
else:
break
# Make sure everything has the same length.
for p in c:
while len(p) < max_idx + 1:
p.append(None)
assert_len_eq(c)
t = t.new_idx(max_idx)
assert t.idx == max_idx
for p in c[s:e + 1]:
p[t.idx] = t
return t
def pretty_print(self):
"""
If type == Track.Type.X
A--AC-C
B-----B
D--DE-E
F-----F
If type == Track.Type.Y
AB DF
|| ||
|| ||
A| D|
C| E|
|| ||
CB EF
"""
def get_str(t):
if not t:
s = ""
elif t.name:
s = t.name
else:
s = str(t)
return s
# Work out how many characters the largest label takes up.
s_maxlen = 1
for row in range(0, self.height):
for col in range(0, self.width):
for t in self[(col, row)]:
s_maxlen = max(s_maxlen, len(get_str(t)))
assert s_maxlen > 0, s_maxlen
s_maxlen += 3
if self.chan_type == Track.Type.Y:
beg_fmt = "{:^%i}" % s_maxlen
end_fmt = beg_fmt
mid_fmt = beg_fmt.format("||")
elif self.chan_type == Track.Type.X:
beg_fmt = "{:>%i}>" % (s_maxlen - 1)
end_fmt = "->{:<%i}" % (s_maxlen - 2)
mid_fmt = "-" * s_maxlen
else:
assert False
non_fmt = " " * s_maxlen
"""
rows[row][col][c]
row: global row location
col: column of output
c: character showing occupation along a track
Channel width may vary across tiles, but all columns within that region
should have the same length
"""
rows = []
for y in range(0, self.height):
cols = []
for x in range(0, self.width):
# Header
hdri = {Track.Type.X: x, Track.Type.Y: y}[self.chan_type]
channels = [("|{: ^%i}" % (s_maxlen - 1)).format(hdri)]
for t in self[(x, y)]:
if not t:
fmt = non_fmt
elif t.start == t.end:
s = get_str(t)
channels.append(
"{} ".format(
"".join(
[
beg_fmt.format(s),
mid_fmt.format(s),
end_fmt.format(s),
]
)[:s_maxlen - 1]
)
)
continue
elif t.start == (x, y):
fmt = beg_fmt
elif t.end == (x, y):
fmt = end_fmt
else:
fmt = mid_fmt
channels.append(fmt.format(get_str(t)))
cols.append(channels)
rows.append(cols)
# Dump the track state as a string
f = io.StringIO()
def p(*args, **kw):
print(*args, file=f, **kw)
if self.chan_type == Track.Type.X:
for r in range(0, len(rows)):
assert_len_eq(rows[r])
# tracks + 1 for header
track_rows = len(rows[r][0])
for tracki in range(0, track_rows):
for c in range(0, len(rows[r])):
p(rows[r][c][tracki], end="")
# Close header
if tracki == 0:
p("|", end="")
p()
p("\n")
elif self.chan_type == Track.Type.Y:
for r in range(0, len(rows)):
# tracks + 1 for header
for c in range(0, len(rows[r])):
track_cols = len(rows[r][c])
p("|*", end="")
for tracki in range(0, track_cols):
p(rows[r][c][tracki], end="")
# Close header
if tracki == 0:
p("|", end="")
p(" ", end="")
p("")
# p("|*|")
else:
assert False
return f.getvalue()
def clear(self):
"""Remove tracks from all currently occupied positions, making channel width 0"""
for x in range(0, self.width):
for y in range(0, self.height):
self[Position(x, y)] = []
def check(self):
"""Self integrity check"""
# Verify uniform track length
if self.chan_type == Track.Type.X:
for y in range(self.height):
assert_len_eq(self.row(y))
elif self.chan_type == Track.Type.Y:
for x in range(self.width):
assert_len_eq(self.column(x))
else:
assert False
def density(self):
"""Return (number occupied positions, total number positions)"""
occupied = 0
net = 0
for _pos, _ti, t in self.foreach_track():
net += 1
if t is not None:
occupied += 1
return occupied, net
def fill_empty(self, segment_id, name=None):
tracks = []
for pos, ti, t in self.foreach_track():
if t is None:
tracks.append(
self.create_track(
Track(
pos,
pos,
segment_id=segment_id,
name=name,
type_hint=self.chan_type,
direction=Track.Direction.BI
),
idx=ti
)
)
return tracks
def channel_widths(self):
"""Return (min channel width, max channel width, row/col widths)"""
cwmin = float('+inf')
cwmax = float('-inf')
xy_list = []
for i in range(self.slicen()):
# track width should be consistent along a slice
# just take the first element
loc = self.slice(i)[0]
cwmin = min(cwmin, len(loc))
cwmax = max(cwmax, len(loc))
xy_list.append(len(loc))
return (cwmin, cwmax, xy_list)
def assert_width(self, width):
"""Assert all channels have specified --route_chan_width"""
for pos in self.foreach_position():
tracks = self[pos]
assert len(
tracks
) == width, 'Bad width Position(x=%d, y=%d): expect %d, got %d' % (
pos.x, pos.y, width, len(tracks)
)
def assert_full(self):
"""Assert all allocated channels are fully occupied"""
self.check()
# occupied, net = self.density()
# print("Occupied %d / %d" % (occupied, net))
for pos, ti, t in self.foreach_track():
assert t is not None, 'Unoccupied Position(x=%d, y=%d) track=%d' % (
pos.x, pos.y, ti
)
class Channels:
"""Holds all channels for the whole grid (X + Y)"""
def __init__(self, size):
self.size = size
self.x = ChannelGrid(size, Track.Type.X)
self.y = ChannelGrid(size, Track.Type.Y)
def create_diag_track(self, start, end, segment_id, idx=None):
# Actually these can be tuple as well
# assert_type(start, Pos)
# assert_type(end, Pos)
# Create track(s)
try:
return (self.create_xy_track(start, end, segment_id, idx=idx), )
except ChannelNotStraight:
assert idx is None, idx
corner = (start.x, end.y)
ta = self.create_xy_track(start, corner, segment_id)[0]
tb = self.create_xy_track(corner, end, segment_id)[0]
return (ta, tb)
def create_xy_track(
self,
start,
end,
segment_id,
idx=None,
name=None,
typeh=None,
direction=None
):
"""
idx: None to automatically allocate
"""
# Actually these can be tuple as well
# assert_type(start, Pos)
# assert_type(end, Pos)
# Create track(s)
# Will throw exception if not straight
t = Track(
start,
end,
segment_id=segment_id,
name=name,
type_hint=typeh,
direction=direction
)
# Add the track to associated channel list
# Get the track now with the index assigned
t = {
Track.Type.X: self.x.create_track,
Track.Type.Y: self.y.create_track
}[t.type](
t, idx=idx
)
# print('create %s %s to %s idx %s' % (t.type, start, end, idx))
assert t.idx is not None
if typeh:
assert t.type == typeh, (t.type.value, typeh)
return t
def pad_channels(self, segment_id):
tracks = []
tracks.extend(self.x.fill_empty(segment_id))
tracks.extend(self.y.fill_empty(segment_id))
return tracks
def pretty_print(self):
s = ''
s += 'X\n'
s += self.x.pretty_print()
s += 'Y\n'
s += self.y.pretty_print()
return s
def clear(self):
"""Remove all channels"""
self.x.clear()
self.y.clear()
def from_xml_nodes(self, nodes_xml):
"""Add channels from <nodes> CHANX/CHANY"""
for node_xml in nodes_xml:
ntype = node_xml.get('type')
if ntype not in ('CHANX', 'CHANY'):
continue
ntype_e = Track.Type(ntype)
direction = Track.Direction(node_xml.get('direction'))
loc = single_element(node_xml, 'loc')
idx = int(loc.get('ptc'))
pos_low, pos_high = node_pos(node_xml)
# print('Importing %s @ %s:%s :: %d' % (ntype, pos_low, pos_high, idx))
segment_xml = single_element(node_xml, 'segment')
segment_id = int(segment_xml.get('segment_id'))
# idx will get assigned when adding to track
try:
self.create_xy_track(
pos_low,
pos_high,
segment_id,
idx=idx,
# XML has no name concept. Should it?
name=None,
typeh=ntype_e,
direction=direction
)
except Exception:
print("Bad XML: %s" % (ET.tostring(node_xml)))
raise
def to_xml_channels(self, channels_xml):
channels_xml.clear()
# channel entry
cw_xmin, cw_xmax, x_lists = self.x.channel_widths()
cw_ymin, cw_ymax, y_lists = self.y.channel_widths()
cw_max = max(cw_xmax, cw_ymax)
ET.SubElement(
channels_xml, 'channel', {
'chan_width_max': str(cw_max),
'x_min': str(cw_xmin),
'x_max': str(cw_xmax),
'y_min': str(cw_ymin),
'y_max': str(cw_ymax),
}
)
# x_list / y_list tries
for i, info in enumerate(x_lists):
ET.SubElement(
channels_xml, 'x_list', {
'index': str(i),
'info': str(info)
}
)
for i, info in enumerate(y_lists):
ET.SubElement(
channels_xml, 'y_list', {
'index': str(i),
'info': str(info)
}
)
def to_xml(self, xml_graph):
self.to_xml_channels(single_element(xml_graph, 'channels'))
def TX(start, end, idx=None, name=None, direction=None, segment_id=None):
if direction is None:
direction = Track.Direction.INC
if segment_id is None:
segment_id = 0
return T(
start,
end,
direction=direction,
segment_id=segment_id,
idx=idx,
name=name,
type_hint=Track.Type.X,
)
def TY(start, end, idx=None, name=None, direction=None, segment_id=None):
if direction is None:
direction = Track.Direction.INC
if segment_id is None:
segment_id = 0
return T(
start,
end,
direction=direction,
segment_id=segment_id,
idx=idx,
name=name,
type_hint=Track.Type.Y,
)
def docprint(x):
pprint.pprint(x.splitlines())
def create_test_channel_grid():
g = ChannelGrid((6, 3), Track.Type.X)
g.create_track(TX((1, 0), (5, 0), name="AA"))
g.create_track(TX((1, 0), (3, 0), name="BB"))
g.create_track(TX((2, 0), (5, 0), name="CC"))
g.create_track(TX((1, 0), (1, 0), name="DD"))
g.create_track(TX((1, 1), (3, 1), name="aa"))
g.create_track(TX((4, 1), (5, 1), name="bb"))
g.create_track(TX((1, 1), (5, 1), name="cc"))
g.check()
return g
def test_x_auto():
"""
>>> docprint(test_x_auto())
['| 0 | 1 | 2 | 3 | 4 | 5 |',
' AA>---------------->AA ',
' BB>------>BB ',
' DD CC>----------->CC ',
'',
'',
'| 0 | 1 | 2 | 3 | 4 | 5 |',
' aa>------>aa bb>->bb ',
' cc>---------------->cc ',
'',
'',
'| 0 | 1 | 2 | 3 | 4 | 5 |',
'',
'']
"""
g = create_test_channel_grid()
return g.pretty_print()
def test_pad():
"""
>>> docprint(test_pad())
['| 0 | 1 | 2 | 3 | 4 | 5 |',
' AA>---------------->AA ',
' BB>------>BB XX XX ',
' DD CC>----------->CC ',
'',
'',
'| 0 | 1 | 2 | 3 | 4 | 5 |',
' aa>------>aa bb>->bb ',
' cc>---------------->cc ',
'',
'',
'| 0 | 1 | 2 | 3 | 4 | 5 |',
'',
'']
"""
g = create_test_channel_grid()
g.fill_empty(0, name='XX')
g.check()
return g.pretty_print()
def test_x_manual():
"""
>>> pprint.pprint(test_x_manual().splitlines())
['| 0 | 1 | 2 | 3 | 4 | 5 |',
' AA>---------------->AA ',
' BB>------>BB ',
' DD CC>----------->CC ',
'',
'',
'| 0 | 1 | 2 | 3 | 4 | 5 |',
' aa>------>aa bb>->bb ',
' cc>---------------->cc ',
'',
'',
'| 0 | 1 | 2 | 3 | 4 | 5 |',
'',
'']
"""
g = ChannelGrid((6, 3), Track.Type.X)
g.create_track(TX((1, 0), (5, 0), name="AA"), idx=0)
g.create_track(TX((1, 0), (3, 0), name="BB"), idx=1)
g.create_track(TX((2, 0), (5, 0), name="CC"), idx=2)
g.create_track(TX((1, 0), (1, 0), name="DD"), idx=2)
g.create_track(TX((1, 1), (3, 1), name="aa"), idx=0)
g.create_track(TX((4, 1), (5, 1), name="bb"), idx=0)
g.create_track(TX((1, 1), (5, 1), name="cc"), idx=1)
try:
g.create_track(TX((1, 1), (5, 1), name="dd"), idx=1)
assert False, "Should have failed to place"
except IndexError:
pass
g.check()
return g.pretty_print()
def test_y_auto():
"""
>>> docprint(test_y_auto())
['|*| 0 | |*| 0 | |*| 0 | ',
'|*| 1 | AA BB DD |*| 1 | aa cc |*| 1 | ',
'|*| 2 | || || CC |*| 2 | || || |*| 2 | ',
'|*| 3 | || BB || |*| 3 | aa || |*| 3 | ',
'|*| 4 | || || |*| 4 | bb || |*| 4 | ',
'|*| 5 | AA CC |*| 5 | bb cc |*| 5 | ']
"""
g = ChannelGrid((3, 6), Track.Type.Y)
g.create_track(TY((0, 1), (0, 5), name="AA"))
g.create_track(TY((0, 1), (0, 3), name="BB"))
g.create_track(TY((0, 2), (0, 5), name="CC"))
g.create_track(TY((0, 1), (0, 1), name="DD"))
g.create_track(TY((1, 1), (1, 3), name="aa"))
g.create_track(TY((1, 4), (1, 5), name="bb"))
g.create_track(TY((1, 1), (1, 5), name="cc"))
g.check()
return g.pretty_print()
if __name__ == "__main__":
import doctest
print('doctest: begin')
failure_count, test_count = doctest.testmod()
assert test_count > 0
assert failure_count == 0, "Doctests failed!"
print('doctest: end')
| 29.273411 | 92 | 0.464797 |
import pprint
import enum
import io
from collections import namedtuple
import lxml.etree as ET
from . import Position
from . import Size
from . import node_pos, single_element
from .utils.asserts import assert_eq
from .utils.asserts import assert_len_eq
from .utils.asserts import assert_type
from .utils.asserts import assert_type_or_none
static_property = property
class ChannelNotStraight(TypeError):
pass
_Track = namedtuple(
"Track", ("start", "end", "direction", "segment_id", "idx")
)
class Track(_Track):
class Type(enum.Enum):
X = 'CHANX'
Y = 'CHANY'
def __repr__(self):
return 'Track.Type.' + self.name
class Direction(enum.Enum):
INC = 'INC_DIR'
DEC = 'DEC_DIR'
BI = 'BI_DIR'
def __repr__(self):
return 'Track.Direction.' + self.name
def __new__(
cls,
start,
end,
direction=Direction.INC,
segment_id=0,
idx=None,
name=None,
type_hint=None,
):
if not isinstance(start, Position):
start = Position(*start)
assert_type(start, Position)
if not isinstance(end, Position):
end = Position(*end)
assert_type(end, Position)
if start.x != end.x and start.y != end.y:
raise ChannelNotStraight(
"Track not straight! {}->{}".format(start, end)
)
assert_type(direction, cls.Direction)
assert_type(segment_id, int)
assert_type_or_none(idx, int)
assert_type_or_none(name, str)
assert_type_or_none(type_hint, cls.Type)
obj = _Track.__new__(cls, start, end, direction, segment_id, idx)
obj.name = name
obj.type_hint = type_hint
return obj
@static_property
def type(self):
if self.type_hint:
return self.type_hint
guess = self.type_guess
if guess is None:
raise ValueError("Ambiguous type")
return guess
@static_property
def type_guess(self):
if self.start.x == self.end.x and self.start.y == self.end.y:
return None
elif self.start.x == self.end.x:
return Track.Type.Y
elif self.start.y == self.end.y:
return Track.Type.X
else:
assert False, self
def positions(self):
startx, endx = sorted([self.start.x, self.end.x])
starty, endy = sorted([self.start.y, self.end.y])
for x in range(startx, endx + 1):
for y in range(starty, endy + 1):
yield Position(x, y)
@static_property
def start0(self):
if self.type == Track.Type.X:
return self.start.x
elif self.type == Track.Type.Y:
return self.start.y
else:
assert False
@static_property
def end0(self):
if self.type == Track.Type.X:
return self.end.x
elif self.type == Track.Type.Y:
return self.end.y
else:
assert False, self.type
@static_property
def common(self):
if self.type == Track.Type.X:
assert_eq(self.start.y, self.end.y)
return self.start.y
elif self.type == Track.Type.Y:
assert_eq(self.start.x, self.end.x)
return self.start.x
else:
assert False
@static_property
def length(self):
try:
return abs(self.end0 - self.start0)
except ValueError:
return 0
def new_idx(self, idx):
return self.__class__(
self.start,
self.end,
self.direction,
self.segment_id,
idx,
name=self.name,
type_hint=self.type_hint,
)
def __repr__(self):
if self.name:
idx_str = ""
if self.idx is not None:
idx_str = ",{}".format(self.idx)
return "T({}{})".format(self.name, idx_str)
idx_str = ""
if self.idx is not None:
idx_str = ", {}".format(self.idx)
return "T(({},{}), ({},{}){})".format(
self.start.x, self.start.y, self.end.x, self.end.y, idx_str
)
def __str__(self):
idx_str = ""
if self.idx is not None:
idx_str = " @{}".format(self.idx)
if self.name:
return "{}{}".format(self.name, idx_str[1:])
return "{} {},{}->{},{}{}".format(
self.type.value, self.start.x, self.start.y, self.end.x,
self.end.y, idx_str
)
T = Track
class ChannelGrid(dict):
def __init__(self, size, chan_type):
self.chan_type = chan_type
self.size = Size(*size)
self.clear()
@property
def width(self):
return self.size.width
@property
def height(self):
return self.size.height
def column(self, x):
column = []
for y in range(0, self.height):
column.append(self[Position(x, y)])
return column
def row(self, y):
row = []
for x in range(0, self.width):
row.append(self[Position(x, y)])
return row
def dim_rc(self):
return {
Track.Type.X: self.height,
Track.Type.Y: self.width,
}[self.chan_type]
def dim_chanl(self):
return {
Track.Type.X: self.width,
Track.Type.Y: self.height,
}[self.chan_type]
def foreach_position(self):
xmin, ymin = {
Track.Type.X: (1, 0),
Track.Type.Y: (0, 1),
}[self.chan_type]
for row in range(ymin, self.height):
for col in range(xmin, self.width):
yield Position(col, row)
def foreach_track(self):
for pos in self.foreach_position():
for ti, t in enumerate(self[pos]):
yield (pos, ti, t)
def slicen(self):
return {
Track.Type.X: self.height,
Track.Type.Y: self.width,
}[self.chan_type]
def slice(self, i):
return {
Track.Type.X: self.row,
Track.Type.Y: self.column,
}[self.chan_type](
i
)
def track_slice(self, t):
return {
Track.Type.X: self.row,
Track.Type.Y: self.column,
}[t.type](
t.common
)
def tracks(self):
ret = set()
for _pos, _ti, t in self.foreach_track():
ret.add(t)
return ret
def validate_pos(self, pos, msg=''):
if msg:
msg = msg + ': '
if pos.x < 0 or pos.y < 0 or pos.x >= self.width or pos.y >= self.height:
raise ValueError(
"%sGrid %s, point %s out of grid size coordinate" %
(msg, self.size, pos)
)
if self.chan_type == Track.Type.X and pos.x == 0:
raise ValueError("%sInvalid CHANX x=0 point %s" % (msg, pos))
if self.chan_type == Track.Type.Y and pos.y == 0:
raise ValueError("%sInvalid CHANY y=0 point %s" % (msg, pos))
def create_track(self, t, idx=None):
assert t.idx is None
force_idx = idx
self.validate_pos(t.start, 'start')
self.validate_pos(t.end, 'end')
if t.type_guess != self.chan_type:
if t.length != 0:
raise TypeError(
"Can only add channels of type {} which {} ({}) is not.".
format(self.chan_type, t, t.type)
)
else:
t.type_hint = self.chan_type
c = self.track_slice(t)
assert_len_eq(c)
s, e = min(t.start0, t.end0), max(t.start0, t.end0)
assert e >= s, (e, '>=', s)
assert s < len(c), (s, '<', len(c), c)
assert e < len(c), (e + 1, '<', len(c), c)
max_idx = force_idx if force_idx is not None else 0
while True:
for p in c[s:e + 1]:
while len(p) < max_idx + 1:
p.append(None)
if p[max_idx] is not None:
# Grow track width
if force_idx is not None:
raise IndexError(
"Can't fit channel at index %d" % force_idx
)
max_idx += 1
break
else:
break
for p in c:
while len(p) < max_idx + 1:
p.append(None)
assert_len_eq(c)
t = t.new_idx(max_idx)
assert t.idx == max_idx
for p in c[s:e + 1]:
p[t.idx] = t
return t
def pretty_print(self):
def get_str(t):
if not t:
s = ""
elif t.name:
s = t.name
else:
s = str(t)
return s
s_maxlen = 1
for row in range(0, self.height):
for col in range(0, self.width):
for t in self[(col, row)]:
s_maxlen = max(s_maxlen, len(get_str(t)))
assert s_maxlen > 0, s_maxlen
s_maxlen += 3
if self.chan_type == Track.Type.Y:
beg_fmt = "{:^%i}" % s_maxlen
end_fmt = beg_fmt
mid_fmt = beg_fmt.format("||")
elif self.chan_type == Track.Type.X:
beg_fmt = "{:>%i}>" % (s_maxlen - 1)
end_fmt = "->{:<%i}" % (s_maxlen - 2)
mid_fmt = "-" * s_maxlen
else:
assert False
non_fmt = " " * s_maxlen
rows = []
for y in range(0, self.height):
cols = []
for x in range(0, self.width):
hdri = {Track.Type.X: x, Track.Type.Y: y}[self.chan_type]
channels = [("|{: ^%i}" % (s_maxlen - 1)).format(hdri)]
for t in self[(x, y)]:
if not t:
fmt = non_fmt
elif t.start == t.end:
s = get_str(t)
channels.append(
"{} ".format(
"".join(
[
beg_fmt.format(s),
mid_fmt.format(s),
end_fmt.format(s),
]
)[:s_maxlen - 1]
)
)
continue
elif t.start == (x, y):
fmt = beg_fmt
elif t.end == (x, y):
fmt = end_fmt
else:
fmt = mid_fmt
channels.append(fmt.format(get_str(t)))
cols.append(channels)
rows.append(cols)
f = io.StringIO()
def p(*args, **kw):
print(*args, file=f, **kw)
if self.chan_type == Track.Type.X:
for r in range(0, len(rows)):
assert_len_eq(rows[r])
track_rows = len(rows[r][0])
for tracki in range(0, track_rows):
for c in range(0, len(rows[r])):
p(rows[r][c][tracki], end="")
if tracki == 0:
p("|", end="")
p()
p("\n")
elif self.chan_type == Track.Type.Y:
for r in range(0, len(rows)):
for c in range(0, len(rows[r])):
track_cols = len(rows[r][c])
p("|*", end="")
for tracki in range(0, track_cols):
p(rows[r][c][tracki], end="")
if tracki == 0:
p("|", end="")
p(" ", end="")
p("")
else:
assert False
return f.getvalue()
def clear(self):
for x in range(0, self.width):
for y in range(0, self.height):
self[Position(x, y)] = []
def check(self):
if self.chan_type == Track.Type.X:
for y in range(self.height):
assert_len_eq(self.row(y))
elif self.chan_type == Track.Type.Y:
for x in range(self.width):
assert_len_eq(self.column(x))
else:
assert False
def density(self):
occupied = 0
net = 0
for _pos, _ti, t in self.foreach_track():
net += 1
if t is not None:
occupied += 1
return occupied, net
def fill_empty(self, segment_id, name=None):
tracks = []
for pos, ti, t in self.foreach_track():
if t is None:
tracks.append(
self.create_track(
Track(
pos,
pos,
segment_id=segment_id,
name=name,
type_hint=self.chan_type,
direction=Track.Direction.BI
),
idx=ti
)
)
return tracks
def channel_widths(self):
cwmin = float('+inf')
cwmax = float('-inf')
xy_list = []
for i in range(self.slicen()):
loc = self.slice(i)[0]
cwmin = min(cwmin, len(loc))
cwmax = max(cwmax, len(loc))
xy_list.append(len(loc))
return (cwmin, cwmax, xy_list)
def assert_width(self, width):
for pos in self.foreach_position():
tracks = self[pos]
assert len(
tracks
) == width, 'Bad width Position(x=%d, y=%d): expect %d, got %d' % (
pos.x, pos.y, width, len(tracks)
)
def assert_full(self):
self.check()
for pos, ti, t in self.foreach_track():
assert t is not None, 'Unoccupied Position(x=%d, y=%d) track=%d' % (
pos.x, pos.y, ti
)
class Channels:
def __init__(self, size):
self.size = size
self.x = ChannelGrid(size, Track.Type.X)
self.y = ChannelGrid(size, Track.Type.Y)
def create_diag_track(self, start, end, segment_id, idx=None):
try:
return (self.create_xy_track(start, end, segment_id, idx=idx), )
except ChannelNotStraight:
assert idx is None, idx
corner = (start.x, end.y)
ta = self.create_xy_track(start, corner, segment_id)[0]
tb = self.create_xy_track(corner, end, segment_id)[0]
return (ta, tb)
def create_xy_track(
self,
start,
end,
segment_id,
idx=None,
name=None,
typeh=None,
direction=None
):
t = Track(
start,
end,
segment_id=segment_id,
name=name,
type_hint=typeh,
direction=direction
)
t = {
Track.Type.X: self.x.create_track,
Track.Type.Y: self.y.create_track
}[t.type](
t, idx=idx
)
assert t.idx is not None
if typeh:
assert t.type == typeh, (t.type.value, typeh)
return t
def pad_channels(self, segment_id):
tracks = []
tracks.extend(self.x.fill_empty(segment_id))
tracks.extend(self.y.fill_empty(segment_id))
return tracks
def pretty_print(self):
s = ''
s += 'X\n'
s += self.x.pretty_print()
s += 'Y\n'
s += self.y.pretty_print()
return s
def clear(self):
self.x.clear()
self.y.clear()
def from_xml_nodes(self, nodes_xml):
for node_xml in nodes_xml:
ntype = node_xml.get('type')
if ntype not in ('CHANX', 'CHANY'):
continue
ntype_e = Track.Type(ntype)
direction = Track.Direction(node_xml.get('direction'))
loc = single_element(node_xml, 'loc')
idx = int(loc.get('ptc'))
pos_low, pos_high = node_pos(node_xml)
segment_xml = single_element(node_xml, 'segment')
segment_id = int(segment_xml.get('segment_id'))
try:
self.create_xy_track(
pos_low,
pos_high,
segment_id,
idx=idx,
name=None,
typeh=ntype_e,
direction=direction
)
except Exception:
print("Bad XML: %s" % (ET.tostring(node_xml)))
raise
def to_xml_channels(self, channels_xml):
channels_xml.clear()
cw_xmin, cw_xmax, x_lists = self.x.channel_widths()
cw_ymin, cw_ymax, y_lists = self.y.channel_widths()
cw_max = max(cw_xmax, cw_ymax)
ET.SubElement(
channels_xml, 'channel', {
'chan_width_max': str(cw_max),
'x_min': str(cw_xmin),
'x_max': str(cw_xmax),
'y_min': str(cw_ymin),
'y_max': str(cw_ymax),
}
)
for i, info in enumerate(x_lists):
ET.SubElement(
channels_xml, 'x_list', {
'index': str(i),
'info': str(info)
}
)
for i, info in enumerate(y_lists):
ET.SubElement(
channels_xml, 'y_list', {
'index': str(i),
'info': str(info)
}
)
def to_xml(self, xml_graph):
self.to_xml_channels(single_element(xml_graph, 'channels'))
def TX(start, end, idx=None, name=None, direction=None, segment_id=None):
if direction is None:
direction = Track.Direction.INC
if segment_id is None:
segment_id = 0
return T(
start,
end,
direction=direction,
segment_id=segment_id,
idx=idx,
name=name,
type_hint=Track.Type.X,
)
def TY(start, end, idx=None, name=None, direction=None, segment_id=None):
if direction is None:
direction = Track.Direction.INC
if segment_id is None:
segment_id = 0
return T(
start,
end,
direction=direction,
segment_id=segment_id,
idx=idx,
name=name,
type_hint=Track.Type.Y,
)
def docprint(x):
pprint.pprint(x.splitlines())
def create_test_channel_grid():
g = ChannelGrid((6, 3), Track.Type.X)
g.create_track(TX((1, 0), (5, 0), name="AA"))
g.create_track(TX((1, 0), (3, 0), name="BB"))
g.create_track(TX((2, 0), (5, 0), name="CC"))
g.create_track(TX((1, 0), (1, 0), name="DD"))
g.create_track(TX((1, 1), (3, 1), name="aa"))
g.create_track(TX((4, 1), (5, 1), name="bb"))
g.create_track(TX((1, 1), (5, 1), name="cc"))
g.check()
return g
def test_x_auto():
g = create_test_channel_grid()
return g.pretty_print()
def test_pad():
g = create_test_channel_grid()
g.fill_empty(0, name='XX')
g.check()
return g.pretty_print()
def test_x_manual():
g = ChannelGrid((6, 3), Track.Type.X)
g.create_track(TX((1, 0), (5, 0), name="AA"), idx=0)
g.create_track(TX((1, 0), (3, 0), name="BB"), idx=1)
g.create_track(TX((2, 0), (5, 0), name="CC"), idx=2)
g.create_track(TX((1, 0), (1, 0), name="DD"), idx=2)
g.create_track(TX((1, 1), (3, 1), name="aa"), idx=0)
g.create_track(TX((4, 1), (5, 1), name="bb"), idx=0)
g.create_track(TX((1, 1), (5, 1), name="cc"), idx=1)
try:
g.create_track(TX((1, 1), (5, 1), name="dd"), idx=1)
assert False, "Should have failed to place"
except IndexError:
pass
g.check()
return g.pretty_print()
def test_y_auto():
g = ChannelGrid((3, 6), Track.Type.Y)
g.create_track(TY((0, 1), (0, 5), name="AA"))
g.create_track(TY((0, 1), (0, 3), name="BB"))
g.create_track(TY((0, 2), (0, 5), name="CC"))
g.create_track(TY((0, 1), (0, 1), name="DD"))
g.create_track(TY((1, 1), (1, 3), name="aa"))
g.create_track(TY((1, 4), (1, 5), name="bb"))
g.create_track(TY((1, 1), (1, 5), name="cc"))
g.check()
return g.pretty_print()
if __name__ == "__main__":
import doctest
print('doctest: begin')
failure_count, test_count = doctest.testmod()
assert test_count > 0
assert failure_count == 0, "Doctests failed!"
print('doctest: end')
| true | true |
1c3158f023b98bdbd790861d7edf522138fed93f | 4,974 | py | Python | contrib/node/tests/python/pants_test/contrib/node/tasks/test_node_task.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | contrib/node/tests/python/pants_test/contrib/node/tasks/test_node_task.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | contrib/node/tests/python/pants_test/contrib/node/tasks/test_node_task.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import os
import string
from textwrap import dedent
from pants.build_graph.target import Target
from pants.testutil.task_test_base import TaskTestBase
from pants.util.contextutil import pushd, temporary_dir
from pants.contrib.node.targets.node_module import NodeModule
from pants.contrib.node.targets.node_remote_module import NodeRemoteModule
from pants.contrib.node.targets.node_test import NodeTest
from pants.contrib.node.tasks.node_task import NodeTask
class NodeTaskTest(TaskTestBase):
class TestNodeTask(NodeTask):
def execute(self):
# We never execute the task, we just want to exercise the helpers it provides subclasses.
raise NotImplementedError()
@classmethod
def task_type(cls):
return cls.TestNodeTask
def test_is_node_package(self):
expected = {
NodeRemoteModule: True,
NodeModule: True,
NodeTest: False,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_package))
def test_is_node_module(self):
expected = {
NodeRemoteModule: False,
NodeModule: True,
NodeTest: False,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_module))
def test_is_node_remote_module(self):
expected = {
NodeRemoteModule: True,
NodeModule: False,
NodeTest: False,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_remote_module))
def test_is_node_test(self):
expected = {
NodeRemoteModule: False,
NodeModule: False,
NodeTest: True,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_test))
def _type_check(self, types, type_check_function):
# Make sure the diff display length is long enough for the test_is_* tests.
# It's a little weird to include this side effect here, but otherwise it would have to
# be duplicated or go in the setup (in which case it would affect all tests).
self.maxDiff = None
target_names = [':' + letter for letter in list(string.ascii_lowercase)]
types_with_target_names = zip(types, target_names)
type_check_results = {type: type_check_function(self.make_target(target_name, type))
for type, target_name in types_with_target_names}
return type_check_results
def test_execute_node(self):
task = self.create_task(self.context())
with temporary_dir() as chroot:
script = os.path.join(chroot, 'test.js')
proof = os.path.join(chroot, 'path')
with open(script, 'w') as fp:
fp.write(dedent("""
var fs = require('fs');
fs.writeFile("{proof}", "Hello World!", function(err) {{}});
""").format(proof=proof))
self.assertFalse(os.path.exists(proof))
returncode, command = task.execute_node([script], workunit_name='test')
self.assertEqual(0, returncode)
self.assertTrue(os.path.exists(proof))
with open(proof, 'r') as fp:
self.assertEqual('Hello World!', fp.read().strip())
def test_execute_npm(self):
task = self.create_task(self.context())
with temporary_dir() as chroot:
proof = os.path.join(chroot, 'proof')
self.assertFalse(os.path.exists(proof))
package = {
'name': 'pantsbuild.pants.test',
'version': '0.0.0',
'scripts': {
'proof': 'echo "42" > {}'.format(proof)
}
}
with open(os.path.join(chroot, 'package.json'), 'w') as fp:
json.dump(package, fp)
with pushd(chroot):
returncode, _ = task.run_script(
'proof',
package_manager=task.node_distribution.get_package_manager(package_manager='npm'),
workunit_name='test')
self.assertEqual(0, returncode)
self.assertTrue(os.path.exists(proof))
with open(proof, 'r') as fp:
self.assertEqual('42', fp.read().strip())
def test_execute_yarnpkg(self):
task = self.create_task(self.context())
with temporary_dir() as chroot:
proof = os.path.join(chroot, 'proof')
self.assertFalse(os.path.exists(proof))
package = {
'name': 'pantsbuild.pants.test',
'version': '0.0.0',
'scripts': {
'proof': 'echo "42" > {}'.format(proof)
}
}
with open(os.path.join(chroot, 'package.json'), 'w') as fp:
json.dump(package, fp)
with pushd(chroot):
returncode, _ = task.run_script(
'proof',
package_manager=task.node_distribution.get_package_manager(package_manager='yarnpkg'),
workunit_name='test')
self.assertEqual(0, returncode)
self.assertTrue(os.path.exists(proof))
with open(proof) as fp:
self.assertEqual('42', fp.read().strip())
| 33.608108 | 97 | 0.664254 |
import json
import os
import string
from textwrap import dedent
from pants.build_graph.target import Target
from pants.testutil.task_test_base import TaskTestBase
from pants.util.contextutil import pushd, temporary_dir
from pants.contrib.node.targets.node_module import NodeModule
from pants.contrib.node.targets.node_remote_module import NodeRemoteModule
from pants.contrib.node.targets.node_test import NodeTest
from pants.contrib.node.tasks.node_task import NodeTask
class NodeTaskTest(TaskTestBase):
class TestNodeTask(NodeTask):
def execute(self):
raise NotImplementedError()
@classmethod
def task_type(cls):
return cls.TestNodeTask
def test_is_node_package(self):
expected = {
NodeRemoteModule: True,
NodeModule: True,
NodeTest: False,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_package))
def test_is_node_module(self):
expected = {
NodeRemoteModule: False,
NodeModule: True,
NodeTest: False,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_module))
def test_is_node_remote_module(self):
expected = {
NodeRemoteModule: True,
NodeModule: False,
NodeTest: False,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_remote_module))
def test_is_node_test(self):
expected = {
NodeRemoteModule: False,
NodeModule: False,
NodeTest: True,
Target: False,
}
self.assertEqual(expected, self._type_check(expected.keys(), NodeTask.is_node_test))
def _type_check(self, types, type_check_function):
# be duplicated or go in the setup (in which case it would affect all tests).
self.maxDiff = None
target_names = [':' + letter for letter in list(string.ascii_lowercase)]
types_with_target_names = zip(types, target_names)
type_check_results = {type: type_check_function(self.make_target(target_name, type))
for type, target_name in types_with_target_names}
return type_check_results
def test_execute_node(self):
task = self.create_task(self.context())
with temporary_dir() as chroot:
script = os.path.join(chroot, 'test.js')
proof = os.path.join(chroot, 'path')
with open(script, 'w') as fp:
fp.write(dedent("""
var fs = require('fs');
fs.writeFile("{proof}", "Hello World!", function(err) {{}});
""").format(proof=proof))
self.assertFalse(os.path.exists(proof))
returncode, command = task.execute_node([script], workunit_name='test')
self.assertEqual(0, returncode)
self.assertTrue(os.path.exists(proof))
with open(proof, 'r') as fp:
self.assertEqual('Hello World!', fp.read().strip())
def test_execute_npm(self):
task = self.create_task(self.context())
with temporary_dir() as chroot:
proof = os.path.join(chroot, 'proof')
self.assertFalse(os.path.exists(proof))
package = {
'name': 'pantsbuild.pants.test',
'version': '0.0.0',
'scripts': {
'proof': 'echo "42" > {}'.format(proof)
}
}
with open(os.path.join(chroot, 'package.json'), 'w') as fp:
json.dump(package, fp)
with pushd(chroot):
returncode, _ = task.run_script(
'proof',
package_manager=task.node_distribution.get_package_manager(package_manager='npm'),
workunit_name='test')
self.assertEqual(0, returncode)
self.assertTrue(os.path.exists(proof))
with open(proof, 'r') as fp:
self.assertEqual('42', fp.read().strip())
def test_execute_yarnpkg(self):
task = self.create_task(self.context())
with temporary_dir() as chroot:
proof = os.path.join(chroot, 'proof')
self.assertFalse(os.path.exists(proof))
package = {
'name': 'pantsbuild.pants.test',
'version': '0.0.0',
'scripts': {
'proof': 'echo "42" > {}'.format(proof)
}
}
with open(os.path.join(chroot, 'package.json'), 'w') as fp:
json.dump(package, fp)
with pushd(chroot):
returncode, _ = task.run_script(
'proof',
package_manager=task.node_distribution.get_package_manager(package_manager='yarnpkg'),
workunit_name='test')
self.assertEqual(0, returncode)
self.assertTrue(os.path.exists(proof))
with open(proof) as fp:
self.assertEqual('42', fp.read().strip())
| true | true |
1c31596f5d270636d8249982d87c2af04c309ec7 | 146 | py | Python | answers/x_3_3.py | ofl/kuku2 | 7247fb1862d917d23258ebe7a93dca5939433225 | [
"MIT"
] | null | null | null | answers/x_3_3.py | ofl/kuku2 | 7247fb1862d917d23258ebe7a93dca5939433225 | [
"MIT"
] | 1 | 2021-11-13T08:03:04.000Z | 2021-11-13T08:03:04.000Z | answers/x_3_3.py | ofl/kuku2 | 7247fb1862d917d23258ebe7a93dca5939433225 | [
"MIT"
] | null | null | null | # x_3_3
#
# statistics.meanで計算した平均の値の小数低下を切り捨ててください
import statistics
import math
data = [7, 4, 3, 9]
print(math.floor(statistics.mean(data)))
| 13.272727 | 41 | 0.746575 |
import statistics
import math
data = [7, 4, 3, 9]
print(math.floor(statistics.mean(data)))
| true | true |
1c315a78f08f3eb22cdbe8649826051b76d55dc4 | 886 | py | Python | mysite/polls/models.py | sssunda/django-test | c262dcbf4599dd222ceeb0256e1005a1802ea997 | [
"MIT"
] | 1 | 2019-06-06T07:56:38.000Z | 2019-06-06T07:56:38.000Z | mysite/polls/models.py | sssunda/django-test | c262dcbf4599dd222ceeb0256e1005a1802ea997 | [
"MIT"
] | null | null | null | mysite/polls/models.py | sssunda/django-test | c262dcbf4599dd222ceeb0256e1005a1802ea997 | [
"MIT"
] | null | null | null | import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now-datetime.timedelta(days=1)<= self.pub_date <=now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently? '
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete = models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| 28.580645 | 70 | 0.716704 | import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now-datetime.timedelta(days=1)<= self.pub_date <=now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently? '
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete = models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| true | true |
1c315b71a2c81781e8dda407026d012b61f1f9c8 | 4,020 | py | Python | python/tools/metrics.py | bearsroom/mxnet-augmented | af4843b249e312014d54ce38545fcb4fa546d7db | [
"Apache-2.0"
] | 1 | 2019-01-16T03:57:53.000Z | 2019-01-16T03:57:53.000Z | python/tools/metrics.py | bearsroom/mxnet-augmented | af4843b249e312014d54ce38545fcb4fa546d7db | [
"Apache-2.0"
] | null | null | null | python/tools/metrics.py | bearsroom/mxnet-augmented | af4843b249e312014d54ce38545fcb4fa546d7db | [
"Apache-2.0"
] | null | null | null |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
class Metrics:
def __init__(self, num_classes):
self.num_classes = num_classes
self.reset()
def reset(self):
self.tp = np.zeros((self.num_classes, ), dtype=np.float32)
self.fp = np.zeros((self.num_classes, ), dtype=np.float32)
self.p = np.zeros((self.num_classes, ), dtype=np.float32)
self.tp_topk = np.zeros((self.num_classes, ), dtype=np.float32)
self.p_topk = np.zeros((self.num_classes, ), dtype=np.float32)
self.fp_images = [[] for _ in range(self.num_classes)]
def update_top1(self, pred_int_list, gt_int_list):
for y_pred, y_gt in zip(pred_int_list, gt_int_list):
if y_gt is None:
continue
self.p[y_gt] += 1
if y_pred == y_gt:
self.tp[y_pred] += 1
else:
self.fp[y_pred] += 1
def update_topk(self, pred_int_list, gt_int_list, top_k=5):
for y_pred, y_gt in zip(pred_int_list, gt_int_list):
if y_gt is None:
continue
assert len(y_pred) == top_k
self.p_topk[y_gt] += 1
if y_gt in y_pred:
self.tp_topk[y_gt] += 1
def get(self, metric='f1_score'):
if metric == 'f1_score':
recall = np.zeros((self.num_classes), dtype=np.float32)
precision = np.zeros((self.num_classes), dtype=np.float32)
f1_score = np.zeros((self.num_classes), dtype=np.float32)
for idx in range(self.num_classes):
if self.tp[idx] + self.fp[idx] > 0:
precision[idx] = self.tp[idx] / float(self.tp[idx] + self.fp[idx])
if self.p[idx] > 0:
recall[idx] = self.tp[idx] / float(self.p[idx])
if precision[idx] + recall[idx] > 0:
f1_score[idx] = 2 * precision[idx] * recall[idx] / float(precision[idx] + recall[idx])
return recall, precision, f1_score
if metric == 'topk_recall':
recall = np.zeros((self.num_classes, ), dtype=np.float32)
for idx in range(self.num_classes):
if self.p_topk[idx] > 0:
recall[idx] = self.tp_topk[idx] / float(self.p_topk[idx])
return recall
def update_fp_images(self, pred_int_list, gt_int_list, probs, im_list):
for y_pred, y_gt, prob, im_name in zip(pred_int_list, gt_int_list, probs, im_list):
if y_gt is None:
continue
if y_pred != y_gt:
prob_pred = prob[y_pred]
prob_gt = prob[y_gt]
self.fp_images[y_pred].append((im_name, y_gt, prob_pred, prob_gt))
def get_fp_images(self):
return self.fp_images
class ConfusionMatrix:
def __init__(self, classes):
self.classes = classes
self.num_classes = len(self.classes)
self.reset()
def reset(self):
self.matrix = np.zeros((self.num_classes, self.num_classes))
def update(self, pred_int_list, gt_int_list):
for y_pred, y_gt in zip(pred_int_list, gt_int_list):
if y_gt is None:
continue
self.matrix[y_gt, y_pred] += 1
def get(self):
return self.matrix
def normalize(self):
p = np.sum(self.matrix, axis=1)
p[np.where(p == 0)[0]] = 1
self.matrix = self.matrix / p
def draw(self, output):
plt.figure()
plt.imshow(self.matrix, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix - {} classes'.format(self.num_classes))
plt.colorbar()
tick_marks = np.arange(self.num_classes)
plt.xticks(tick_marks, self.classes, rotation=90)
plt.yticks(tick_marks, self.classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predict label')
plt.savefig(output, format='jpg', quality=80)
plt.clf()
| 37.570093 | 105 | 0.575871 |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
class Metrics:
def __init__(self, num_classes):
self.num_classes = num_classes
self.reset()
def reset(self):
self.tp = np.zeros((self.num_classes, ), dtype=np.float32)
self.fp = np.zeros((self.num_classes, ), dtype=np.float32)
self.p = np.zeros((self.num_classes, ), dtype=np.float32)
self.tp_topk = np.zeros((self.num_classes, ), dtype=np.float32)
self.p_topk = np.zeros((self.num_classes, ), dtype=np.float32)
self.fp_images = [[] for _ in range(self.num_classes)]
def update_top1(self, pred_int_list, gt_int_list):
for y_pred, y_gt in zip(pred_int_list, gt_int_list):
if y_gt is None:
continue
self.p[y_gt] += 1
if y_pred == y_gt:
self.tp[y_pred] += 1
else:
self.fp[y_pred] += 1
def update_topk(self, pred_int_list, gt_int_list, top_k=5):
for y_pred, y_gt in zip(pred_int_list, gt_int_list):
if y_gt is None:
continue
assert len(y_pred) == top_k
self.p_topk[y_gt] += 1
if y_gt in y_pred:
self.tp_topk[y_gt] += 1
def get(self, metric='f1_score'):
if metric == 'f1_score':
recall = np.zeros((self.num_classes), dtype=np.float32)
precision = np.zeros((self.num_classes), dtype=np.float32)
f1_score = np.zeros((self.num_classes), dtype=np.float32)
for idx in range(self.num_classes):
if self.tp[idx] + self.fp[idx] > 0:
precision[idx] = self.tp[idx] / float(self.tp[idx] + self.fp[idx])
if self.p[idx] > 0:
recall[idx] = self.tp[idx] / float(self.p[idx])
if precision[idx] + recall[idx] > 0:
f1_score[idx] = 2 * precision[idx] * recall[idx] / float(precision[idx] + recall[idx])
return recall, precision, f1_score
if metric == 'topk_recall':
recall = np.zeros((self.num_classes, ), dtype=np.float32)
for idx in range(self.num_classes):
if self.p_topk[idx] > 0:
recall[idx] = self.tp_topk[idx] / float(self.p_topk[idx])
return recall
def update_fp_images(self, pred_int_list, gt_int_list, probs, im_list):
for y_pred, y_gt, prob, im_name in zip(pred_int_list, gt_int_list, probs, im_list):
if y_gt is None:
continue
if y_pred != y_gt:
prob_pred = prob[y_pred]
prob_gt = prob[y_gt]
self.fp_images[y_pred].append((im_name, y_gt, prob_pred, prob_gt))
def get_fp_images(self):
return self.fp_images
class ConfusionMatrix:
def __init__(self, classes):
self.classes = classes
self.num_classes = len(self.classes)
self.reset()
def reset(self):
self.matrix = np.zeros((self.num_classes, self.num_classes))
def update(self, pred_int_list, gt_int_list):
for y_pred, y_gt in zip(pred_int_list, gt_int_list):
if y_gt is None:
continue
self.matrix[y_gt, y_pred] += 1
def get(self):
return self.matrix
def normalize(self):
p = np.sum(self.matrix, axis=1)
p[np.where(p == 0)[0]] = 1
self.matrix = self.matrix / p
def draw(self, output):
plt.figure()
plt.imshow(self.matrix, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix - {} classes'.format(self.num_classes))
plt.colorbar()
tick_marks = np.arange(self.num_classes)
plt.xticks(tick_marks, self.classes, rotation=90)
plt.yticks(tick_marks, self.classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predict label')
plt.savefig(output, format='jpg', quality=80)
plt.clf()
| true | true |
1c315d2b105e29664405e2585fb0f7c0c8048c8b | 894 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/mps/models/ThumbnailTaskSource.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | null | null | null | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/mps/models/ThumbnailTaskSource.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | null | null | null | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/mps/models/ThumbnailTaskSource.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | null | null | null | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class ThumbnailTaskSource(object):
def __init__(self, bucket, key, ):
"""
:param bucket: 输入视频信息的 bucket
:param key: 输入视频信息的 Key
"""
self.bucket = bucket
self.key = key
| 29.8 | 75 | 0.705817 |
class ThumbnailTaskSource(object):
def __init__(self, bucket, key, ):
self.bucket = bucket
self.key = key
| true | true |
1c315df0d25a6b51d1daf2448ba6c2e59d1bf433 | 18,957 | py | Python | src/schnetpack/representation/schnet.py | CLEANit/schnetpack | 4760ff452e10e5f8b75d19c3f2db595145bcae0b | [
"MIT"
] | null | null | null | src/schnetpack/representation/schnet.py | CLEANit/schnetpack | 4760ff452e10e5f8b75d19c3f2db595145bcae0b | [
"MIT"
] | null | null | null | src/schnetpack/representation/schnet.py | CLEANit/schnetpack | 4760ff452e10e5f8b75d19c3f2db595145bcae0b | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from schnetpack.nn.base import Dense, ScaleShift
from schnetpack import Properties
from schnetpack.nn.cfconv import CFConv, VoxelCFConv
from schnetpack.nn.cutoff import CosineCutoff
from schnetpack.nn.acsf import GaussianSmearing
from schnetpack.nn.neighbors import AtomDistances
from schnetpack.nn.activations import shifted_softplus
from schnetpack.nn.blocks import MLP
class SchNetInteraction(nn.Module):
r"""SchNet interaction block for modeling interactions of atomistic systems.
Args:
n_atom_basis (int): number of features to describe atomic environments.
n_spatial_basis (int): number of input features of filter-generating networks.
n_filters (int): number of filters used in continuous-filter convolution.
cutoff (float): cutoff radius.
cutoff_network (nn.Module, optional): cutoff layer.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
"""
def __init__(
self,
n_atom_basis,
n_spatial_basis,
n_filters,
cutoff,
cutoff_network=CosineCutoff,
normalize_filter=False,
):
super(SchNetInteraction, self).__init__()
# filter block used in interaction block
self.filter_network = nn.Sequential(
Dense(n_spatial_basis, n_filters, activation=shifted_softplus),
Dense(n_filters, n_filters),
)
# cutoff layer used in interaction block
self.cutoff_network = cutoff_network(cutoff)
# interaction block
self.cfconv = CFConv(
n_atom_basis,
n_filters,
n_atom_basis,
self.filter_network,
cutoff_network=self.cutoff_network,
activation=shifted_softplus,
normalize_filter=normalize_filter,
)
# dense layer
self.dense = Dense(n_atom_basis, n_atom_basis, bias=True, activation=None)
def forward(self, x, r_ij, neighbors, neighbor_mask, f_ij=None):
"""Compute interaction output.
Args:
x (torch.Tensor): input representation/embedding of atomic environments
with (N_b, N_a, n_atom_basis) shape.
r_ij (torch.Tensor): interatomic distances of (N_b, N_a, N_nbh) shape.
neighbors (torch.Tensor): indices of neighbors of (N_b, N_a, N_nbh) shape.
neighbor_mask (torch.Tensor): mask to filter out non-existing neighbors
introduced via padding.
f_ij (torch.Tensor, optional): expanded interatomic distances in a basis.
If None, r_ij.unsqueeze(-1) is used.
Returns:
torch.Tensor: block output with (N_b, N_a, n_atom_basis) shape.
"""
# continuous-filter convolution interaction block followed by Dense layer
v = self.cfconv(x, r_ij, neighbors, neighbor_mask, f_ij)
v = self.dense(v)
return v
class VoxelSchNetInteraction(nn.Module):
r"""SchNet interaction block for modeling interactions of atomistic systems.
Args:
n_atom_basis (int): number of features to describe atomic environments.
n_spatial_basis (int): number of input features of filter-generating networks.
n_filters (int): number of filters used in continuous-filter convolution.
cutoff (float): cutoff radius.
cutoff_network (nn.Module, optional): cutoff layer.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
"""
def __init__(
self,
n_atom_basis,
n_spatial_basis,
n_filters,
cutoff,
cutoff_network=CosineCutoff,
normalize_filter=False,
):
super(VoxelSchNetInteraction, self).__init__()
# filter block used in interaction block
self.filter_network = nn.Sequential(
Dense(n_spatial_basis, n_filters, activation=shifted_softplus),
Dense(n_filters, n_filters),
)
# cutoff layer used in interaction block
self.cutoff_network = cutoff_network(cutoff)
# interaction block
self.cfconv = VoxelCFConv(
n_atom_basis,
n_filters,
n_atom_basis,
self.filter_network,
cutoff_network=self.cutoff_network,
activation=shifted_softplus,
normalize_filter=normalize_filter,
)
# dense layer
self.dense = Dense(n_atom_basis, n_atom_basis, bias=True, activation=None)
def forward(self, x, r_ij, neighbors, neighbor_mask, f_ij=None):
"""Compute interaction output.
Args:
x (torch.Tensor): input representation/embedding of atomic environments
with (N_b, N_a, n_atom_basis) shape.
r_ij (torch.Tensor): interatomic distances of (N_b, N_a, N_nbh) shape.
neighbors (torch.Tensor): indices of neighbors of (N_b, N_a, N_nbh) shape.
neighbor_mask (torch.Tensor): mask to filter out non-existing neighbors
introduced via padding.
f_ij (torch.Tensor, optional): expanded interatomic distances in a basis.
If None, r_ij.unsqueeze(-1) is used.
Returns:
torch.Tensor: block output with (N_b, N_a, n_atom_basis) shape.
"""
# continuous-filter convolution interaction block followed by Dense layer
v = self.cfconv(x, r_ij, neighbors, neighbor_mask, f_ij)
v = self.dense(v)
return v
class SchNet(nn.Module):
"""SchNet architecture for learning representations of atomistic systems.
Args:
n_atom_basis (int, optional): number of features to describe atomic environments.
This determines the size of each embedding vector; i.e. embeddings_dim.
n_filters (int, optional): number of filters used in continuous-filter convolution
n_interactions (int, optional): number of interaction blocks.
cutoff (float, optional): cutoff radius.
n_gaussians (int, optional): number of Gaussian functions used to expand
atomic distances.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
coupled_interactions (bool, optional): if True, share the weights across
interaction blocks and filter-generating networks.
return_intermediate (bool, optional): if True, `forward` method also returns
intermediate atomic representations after each interaction block is applied.
max_z (int, optional): maximum nuclear charge allowed in database. This
determines the size of the dictionary of embedding; i.e. num_embeddings.
cutoff_network (nn.Module, optional): cutoff layer.
trainable_gaussians (bool, optional): If True, widths and offset of Gaussian
functions are adjusted during training process.
distance_expansion (nn.Module, optional): layer for expanding interatomic
distances in a basis.
charged_systems (bool, optional):
References:
.. [#schnet1] Schütt, Arbabzadah, Chmiela, Müller, Tkatchenko:
Quantum-chemical insights from deep tensor neural networks.
Nature Communications, 8, 13890. 2017.
.. [#schnet_transfer] Schütt, Kindermans, Sauceda, Chmiela, Tkatchenko, Müller:
SchNet: A continuous-filter convolutional neural network for modeling quantum
interactions.
In Advances in Neural Information Processing Systems, pp. 992-1002. 2017.
.. [#schnet3] Schütt, Sauceda, Kindermans, Tkatchenko, Müller:
SchNet - a deep learning architecture for molceules and materials.
The Journal of Chemical Physics 148 (24), 241722. 2018.
"""
def __init__(
self,
n_atom_basis=128,
n_filters=128,
n_interactions=3,
cutoff=5.0,
n_gaussians=25,
normalize_filter=False,
coupled_interactions=False,
return_intermediate=False,
max_z=100,
cutoff_network=CosineCutoff,
trainable_gaussians=False,
distance_expansion=None,
charged_systems=False,
):
super(SchNet, self).__init__()
self.n_atom_basis = n_atom_basis
# make a lookup table to store embeddings for each element (up to atomic
# number max_z) each of which is a vector of size n_atom_basis
self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0)
# layer for computing interatomic distances
self.distances = AtomDistances()
# layer for expanding interatomic distances in a basis
if distance_expansion is None:
self.distance_expansion = GaussianSmearing(
0.0, cutoff, n_gaussians, trainable=trainable_gaussians
)
else:
self.distance_expansion = distance_expansion
# block for computing interaction
if coupled_interactions:
# use the same SchNetInteraction instance (hence the same weights)
self.interactions = nn.ModuleList(
[
SchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
]
* n_interactions
)
else:
# use one SchNetInteraction instance for each interaction
self.interactions = nn.ModuleList(
[
SchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
for _ in range(n_interactions)
]
)
# set attributes
self.return_intermediate = return_intermediate
self.charged_systems = charged_systems
if charged_systems:
self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))
self.charge.data.normal_(0, 1.0 / n_atom_basis ** 0.5)
def forward(self, inputs):
"""Compute atomic representations/embeddings.
Args:
inputs (dict of torch.Tensor): SchNetPack dictionary of input tensors.
Returns:
torch.Tensor: atom-wise representation.
list of torch.Tensor: intermediate atom-wise representations, if
return_intermediate=True was used.
"""
# get tensors from input dictionary
atomic_numbers = inputs[Properties.Z]
positions = inputs[Properties.R]
cell = inputs[Properties.cell]
cell_offset = inputs[Properties.cell_offset]
neighbors = inputs[Properties.neighbors]
neighbor_mask = inputs[Properties.neighbor_mask]
atom_mask = inputs[Properties.atom_mask]
# get atom embeddings for the input atomic numbers
x = self.embedding(atomic_numbers)
if False and self.charged_systems and Properties.charge in inputs.keys():
n_atoms = torch.sum(atom_mask, dim=1, keepdim=True)
charge = inputs[Properties.charge] / n_atoms # B
charge = charge[:, None] * self.charge # B x F
x = x + charge
# compute interatomic distance of every atom to its neighbors
r_ij = self.distances(
positions, neighbors, cell, cell_offset, neighbor_mask=neighbor_mask
)
# expand interatomic distances (for example, Gaussian smearing)
f_ij = self.distance_expansion(r_ij)
# store intermediate representations
if self.return_intermediate:
xs = [x]
# compute interaction block to update atomic embeddings
for interaction in self.interactions:
v = interaction(x, r_ij, neighbors, neighbor_mask, f_ij=f_ij)
x = x + v
if self.return_intermediate:
xs.append(x)
if self.return_intermediate:
return x, xs
return x
class VoxelSchNet(nn.Module):
"""SchNet architecture for learning representations of atomistic systems.
Args:
n_atom_basis (int, optional): number of features to describe atomic environments.
This determines the size of each embedding vector; i.e. embeddings_dim.
n_filters (int, optional): number of filters used in continuous-filter convolution
n_interactions (int, optional): number of interaction blocks.
cutoff (float, optional): cutoff radius.
n_gaussians (int, optional): number of Gaussian functions used to expand
atomic distances.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
coupled_interactions (bool, optional): if True, share the weights across
interaction blocks and filter-generating networks.
return_intermediate (bool, optional): if True, `forward` method also returns
intermediate atomic representations after each interaction block is applied.
max_z (int, optional): maximum nuclear charge allowed in database. This
determines the size of the dictionary of embedding; i.e. num_embeddings.
cutoff_network (nn.Module, optional): cutoff layer.
trainable_gaussians (bool, optional): If True, widths and offset of Gaussian
functions are adjusted during training process.
distance_expansion (nn.Module, optional): layer for expanding interatomic
distances in a basis.
charged_systems (bool, optional):
References:
.. [#schnet1] Schütt, Arbabzadah, Chmiela, Müller, Tkatchenko:
Quantum-chemical insights from deep tensor neural networks.
Nature Communications, 8, 13890. 2017.
.. [#schnet_transfer] Schütt, Kindermans, Sauceda, Chmiela, Tkatchenko, Müller:
SchNet: A continuous-filter convolutional neural network for modeling quantum
interactions.
In Advances in Neural Information Processing Systems, pp. 992-1002. 2017.
.. [#schnet3] Schütt, Sauceda, Kindermans, Tkatchenko, Müller:
SchNet - a deep learning architecture for molceules and materials.
The Journal of Chemical Physics 148 (24), 241722. 2018.
"""
def __init__(
self,
n_atom_basis=512,
n_filters=512,
n_interactions=3,
cutoff=5.0,
n_gaussians=300,
normalize_filter=False,
coupled_interactions=False,
return_intermediate=False,
max_z=100,
cutoff_network=CosineCutoff,
trainable_gaussians=False,
distance_expansion=None,
charged_systems=False,
):
super(VoxelSchNet, self).__init__()
self.n_atom_basis = n_atom_basis
# make a lookup table to store embeddings for each element (up to atomic
# number max_z) each of which is a vector of size n_atom_basis
self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0)
# layer for computing interatomic distances
self.distances = AtomDistances()
# layer for expanding interatomic distances in a basis
if distance_expansion is None:
self.distance_expansion = GaussianSmearing(
0.0, cutoff, n_gaussians, trainable=trainable_gaussians
)
else:
self.distance_expansion = distance_expansion
# block for computing interaction
if coupled_interactions:
# use the same SchNetInteraction instance (hence the same weights)
self.interactions = nn.ModuleList(
[
VoxelSchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
]
* n_interactions
)
else:
# use one SchNetInteraction instance for each interaction
self.interactions = nn.ModuleList(
[
VoxelSchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
for _ in range(n_interactions)
]
)
# self.standardize = ScaleShift(0, 1)
self.final_layer = MLP(n_atom_basis, 6, None, 2, shifted_softplus)
# set attributes
self.return_intermediate = return_intermediate
self.charged_systems = charged_systems
if charged_systems:
self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))
self.charge.data.normal_(0, 1.0 / n_atom_basis ** 0.5)
def forward(self, inputs):
"""Compute atomic representations/embeddings.
Args:
inputs (dict of torch.Tensor): SchNetPack dictionary of input tensors.
Returns:
torch.Tensor: atom-wise representation.
list of torch.Tensor: intermediate atom-wise representations, if
return_intermediate=True was used.
"""
# get tensors from input dictionary
atomic_numbers = inputs['atomic_numbers']
distances = inputs['distances']
neighbors = inputs['neighbors']
neighbor_mask = inputs['neighbor_masks']
# get atom embeddings for the input atomic numbers
x = self.embedding(atomic_numbers)
# expand interatomic distances (for example, Gaussian smearing)
f_ij = self.distance_expansion(distances)
# store intermediate representations
if self.return_intermediate:
xs = [x]
# compute interaction block to update atomic embeddings
for interaction in self.interactions:
v = interaction(x, distances, neighbors, neighbor_mask, f_ij=f_ij)
x = x + v
if self.return_intermediate:
xs.append(x)
if self.return_intermediate:
return x, xs
return self.final_layer(x[:, 0, :])
| 40.767742 | 90 | 0.633223 | import torch
import torch.nn as nn
from schnetpack.nn.base import Dense, ScaleShift
from schnetpack import Properties
from schnetpack.nn.cfconv import CFConv, VoxelCFConv
from schnetpack.nn.cutoff import CosineCutoff
from schnetpack.nn.acsf import GaussianSmearing
from schnetpack.nn.neighbors import AtomDistances
from schnetpack.nn.activations import shifted_softplus
from schnetpack.nn.blocks import MLP
class SchNetInteraction(nn.Module):
def __init__(
self,
n_atom_basis,
n_spatial_basis,
n_filters,
cutoff,
cutoff_network=CosineCutoff,
normalize_filter=False,
):
super(SchNetInteraction, self).__init__()
self.filter_network = nn.Sequential(
Dense(n_spatial_basis, n_filters, activation=shifted_softplus),
Dense(n_filters, n_filters),
)
self.cutoff_network = cutoff_network(cutoff)
self.cfconv = CFConv(
n_atom_basis,
n_filters,
n_atom_basis,
self.filter_network,
cutoff_network=self.cutoff_network,
activation=shifted_softplus,
normalize_filter=normalize_filter,
)
self.dense = Dense(n_atom_basis, n_atom_basis, bias=True, activation=None)
def forward(self, x, r_ij, neighbors, neighbor_mask, f_ij=None):
v = self.cfconv(x, r_ij, neighbors, neighbor_mask, f_ij)
v = self.dense(v)
return v
class VoxelSchNetInteraction(nn.Module):
def __init__(
self,
n_atom_basis,
n_spatial_basis,
n_filters,
cutoff,
cutoff_network=CosineCutoff,
normalize_filter=False,
):
super(VoxelSchNetInteraction, self).__init__()
self.filter_network = nn.Sequential(
Dense(n_spatial_basis, n_filters, activation=shifted_softplus),
Dense(n_filters, n_filters),
)
self.cutoff_network = cutoff_network(cutoff)
self.cfconv = VoxelCFConv(
n_atom_basis,
n_filters,
n_atom_basis,
self.filter_network,
cutoff_network=self.cutoff_network,
activation=shifted_softplus,
normalize_filter=normalize_filter,
)
self.dense = Dense(n_atom_basis, n_atom_basis, bias=True, activation=None)
def forward(self, x, r_ij, neighbors, neighbor_mask, f_ij=None):
v = self.cfconv(x, r_ij, neighbors, neighbor_mask, f_ij)
v = self.dense(v)
return v
class SchNet(nn.Module):
def __init__(
self,
n_atom_basis=128,
n_filters=128,
n_interactions=3,
cutoff=5.0,
n_gaussians=25,
normalize_filter=False,
coupled_interactions=False,
return_intermediate=False,
max_z=100,
cutoff_network=CosineCutoff,
trainable_gaussians=False,
distance_expansion=None,
charged_systems=False,
):
super(SchNet, self).__init__()
self.n_atom_basis = n_atom_basis
self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0)
self.distances = AtomDistances()
if distance_expansion is None:
self.distance_expansion = GaussianSmearing(
0.0, cutoff, n_gaussians, trainable=trainable_gaussians
)
else:
self.distance_expansion = distance_expansion
if coupled_interactions:
self.interactions = nn.ModuleList(
[
SchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
]
* n_interactions
)
else:
self.interactions = nn.ModuleList(
[
SchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
for _ in range(n_interactions)
]
)
self.return_intermediate = return_intermediate
self.charged_systems = charged_systems
if charged_systems:
self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))
self.charge.data.normal_(0, 1.0 / n_atom_basis ** 0.5)
def forward(self, inputs):
atomic_numbers = inputs[Properties.Z]
positions = inputs[Properties.R]
cell = inputs[Properties.cell]
cell_offset = inputs[Properties.cell_offset]
neighbors = inputs[Properties.neighbors]
neighbor_mask = inputs[Properties.neighbor_mask]
atom_mask = inputs[Properties.atom_mask]
x = self.embedding(atomic_numbers)
if False and self.charged_systems and Properties.charge in inputs.keys():
n_atoms = torch.sum(atom_mask, dim=1, keepdim=True)
charge = inputs[Properties.charge] / n_atoms
charge = charge[:, None] * self.charge
x = x + charge
r_ij = self.distances(
positions, neighbors, cell, cell_offset, neighbor_mask=neighbor_mask
)
f_ij = self.distance_expansion(r_ij)
if self.return_intermediate:
xs = [x]
for interaction in self.interactions:
v = interaction(x, r_ij, neighbors, neighbor_mask, f_ij=f_ij)
x = x + v
if self.return_intermediate:
xs.append(x)
if self.return_intermediate:
return x, xs
return x
class VoxelSchNet(nn.Module):
def __init__(
self,
n_atom_basis=512,
n_filters=512,
n_interactions=3,
cutoff=5.0,
n_gaussians=300,
normalize_filter=False,
coupled_interactions=False,
return_intermediate=False,
max_z=100,
cutoff_network=CosineCutoff,
trainable_gaussians=False,
distance_expansion=None,
charged_systems=False,
):
super(VoxelSchNet, self).__init__()
self.n_atom_basis = n_atom_basis
self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0)
self.distances = AtomDistances()
if distance_expansion is None:
self.distance_expansion = GaussianSmearing(
0.0, cutoff, n_gaussians, trainable=trainable_gaussians
)
else:
self.distance_expansion = distance_expansion
if coupled_interactions:
self.interactions = nn.ModuleList(
[
VoxelSchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
]
* n_interactions
)
else:
self.interactions = nn.ModuleList(
[
VoxelSchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
for _ in range(n_interactions)
]
)
self.final_layer = MLP(n_atom_basis, 6, None, 2, shifted_softplus)
self.return_intermediate = return_intermediate
self.charged_systems = charged_systems
if charged_systems:
self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))
self.charge.data.normal_(0, 1.0 / n_atom_basis ** 0.5)
def forward(self, inputs):
atomic_numbers = inputs['atomic_numbers']
distances = inputs['distances']
neighbors = inputs['neighbors']
neighbor_mask = inputs['neighbor_masks']
x = self.embedding(atomic_numbers)
f_ij = self.distance_expansion(distances)
if self.return_intermediate:
xs = [x]
for interaction in self.interactions:
v = interaction(x, distances, neighbors, neighbor_mask, f_ij=f_ij)
x = x + v
if self.return_intermediate:
xs.append(x)
if self.return_intermediate:
return x, xs
return self.final_layer(x[:, 0, :])
| true | true |
1c315eaced8aca10753a735564baefa1368a0036 | 871 | py | Python | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/ssd1675_simpletest.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 47 | 2021-02-15T23:02:36.000Z | 2022-03-04T21:30:03.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/ssd1675_simpletest.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 7 | 2021-02-19T20:00:08.000Z | 2022-01-14T10:51:12.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/ssd1675_simpletest.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 14 | 2021-02-20T17:40:56.000Z | 2022-01-01T19:53:38.000Z | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""Simple test script for 2.13" 250x122 black and white featherwing.
Supported products:
* Adafruit 2.13" Black and White FeatherWing
* https://www.adafruit.com/product/4195
"""
import time
import board
import displayio
import adafruit_ssd1675
displayio.release_displays()
epd_cs = board.D9
epd_dc = board.D10
display_bus = displayio.FourWire(
board.SPI(), command=epd_dc, chip_select=epd_cs, baudrate=1000000
)
time.sleep(1)
display = adafruit_ssd1675.SSD1675(display_bus, width=250, height=122, rotation=90)
g = displayio.Group()
f = open("/display-ruler.bmp", "rb")
pic = displayio.OnDiskBitmap(f)
t = displayio.TileGrid(pic, pixel_shader=displayio.ColorConverter())
g.append(t)
display.show(g)
display.refresh()
print("refreshed")
time.sleep(120)
| 20.255814 | 83 | 0.756602 |
import time
import board
import displayio
import adafruit_ssd1675
displayio.release_displays()
epd_cs = board.D9
epd_dc = board.D10
display_bus = displayio.FourWire(
board.SPI(), command=epd_dc, chip_select=epd_cs, baudrate=1000000
)
time.sleep(1)
display = adafruit_ssd1675.SSD1675(display_bus, width=250, height=122, rotation=90)
g = displayio.Group()
f = open("/display-ruler.bmp", "rb")
pic = displayio.OnDiskBitmap(f)
t = displayio.TileGrid(pic, pixel_shader=displayio.ColorConverter())
g.append(t)
display.show(g)
display.refresh()
print("refreshed")
time.sleep(120)
| true | true |
1c315ef7cf27f9128f96932f9a59d1ce4eced390 | 409 | py | Python | config.py | sasili-adetunji/featureRequest | eb4a46f57566462cdf9a1ab513a062498d34ecd8 | [
"MIT"
] | null | null | null | config.py | sasili-adetunji/featureRequest | eb4a46f57566462cdf9a1ab513a062498d34ecd8 | [
"MIT"
] | null | null | null | config.py | sasili-adetunji/featureRequest | eb4a46f57566462cdf9a1ab513a062498d34ecd8 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
| 31.461538 | 76 | 0.728606 | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
| true | true |
1c315ff863a29b576ddb63f0c83e87ea550ff7ea | 78,587 | py | Python | keystone/identity/backends/ldap/common.py | ISCAS-VDI/keystone | 11af181c06d78026c89a873f62931558e80f3192 | [
"Apache-2.0"
] | null | null | null | keystone/identity/backends/ldap/common.py | ISCAS-VDI/keystone | 11af181c06d78026c89a873f62931558e80f3192 | [
"Apache-2.0"
] | null | null | null | keystone/identity/backends/ldap/common.py | ISCAS-VDI/keystone | 11af181c06d78026c89a873f62931558e80f3192 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap.controls
import ldap.filter
import ldappool
from oslo_log import log
from oslo_utils import reflection
import six
from six.moves import map, zip
from keystone.common import driver_hints
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
# indicate that no attributes should be returned besides the DN.
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
def utf8_encode(value):
"""Encode a basestring to UTF-8.
If the string is unicode encode it to UTF-8, if the string is
str then assume it's already encoded. Otherwise raise a TypeError.
:param value: A basestring
:returns: UTF-8 encoded version of value
:raises TypeError: If value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
value_cls_name = reflection.get_class_name(
value, fully_qualified=False)
raise TypeError("value must be basestring, "
"not %s" % value_cls_name)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
"""Decode a from UTF-8 into unicode.
If the value is a binary string assume it's UTF-8 encoded and decode
it into a unicode string. Otherwise convert the value from its
type into a unicode string.
:param value: value to be returned as unicode
:returns: value as unicode
:raises UnicodeDecodeError: for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
"""Type convert a Python value to a type accepted by LDAP (unicode).
The LDAP API only accepts strings for values therefore convert
the value's type to a unicode string. A subsequent type conversion
will encode the unicode as UTF-8 as required by the python-ldap API,
but for now we just want a string representation of the value.
:param val: The value to convert to a LDAP string representation
:returns: unicode string representation of value.
"""
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def enabled2py(val):
"""Similar to ldap2py, only useful for the enabled attribute."""
try:
return LDAP_VALUES[val]
except KeyError: # nosec
# It wasn't a boolean value, will try as an int instead.
pass
try:
return int(val)
except ValueError: # nosec
# It wasn't an int either, will try as utf8 instead.
pass
return utf8_decode(val)
def ldap2py(val):
"""Convert an LDAP formatted value to Python type used by OpenStack.
Virtually all LDAP values are stored as UTF-8 encoded strings.
OpenStack prefers values which are unicode friendly.
:param val: LDAP formatted value
:returns: val converted to preferred Python type
"""
return utf8_decode(val)
def convert_ldap_result(ldap_result):
"""Convert LDAP search result to Python types used by OpenStack.
Each result tuple is of the form (dn, attrs), where dn is a string
containing the DN (distinguished name) of the entry, and attrs is
a dictionary containing the attributes associated with the
entry. The keys of attrs are strings, and the associated values
are lists of strings.
OpenStack wants to use Python types of its choosing. Strings will
be unicode, truth values boolean, whole numbers int's, etc. DN's will
also be decoded from UTF-8 to unicode.
:param ldap_result: LDAP search result
:returns: list of 2-tuples containing (dn, attrs) where dn is unicode
and attrs is a dict whose values are type converted to
OpenStack preferred types.
"""
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
# this is a Referral object, rather than an Entry object
at_least_one_referral = True
continue
for kind, values in attrs.items():
try:
val2py = enabled2py if kind == 'enabled' else ldap2py
ldap_attrs[kind] = [val2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
"""Prepare a string for case-insensitive comparison.
This is defined in RFC4518. For simplicity, all this function does is
lowercase all the characters, strip leading and trailing whitespace,
and compress sequences of spaces to a single space.
"""
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
"""Return True if and only if the AVAs are equal.
When comparing AVAs, the equality matching rule for the attribute type
should be taken into consideration. For simplicity, this implementation
does a case-insensitive comparison.
Note that this function uses prep_case_insenstive so the limitations of
that function apply here.
"""
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
"""Return True if and only if the RDNs are equal.
* RDNs must have the same number of AVAs.
* Each AVA of the RDNs must be the equal for the same attribute type. The
order isn't significant. Note that an attribute type will only be in one
AVA in an RDN, otherwise the DN wouldn't be valid.
* Attribute types aren't case sensitive. Note that attribute type
comparison is more complicated than implemented. This function only
compares case-insentive. The code should handle multiple names for an
attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
Note that this function uses is_ava_value_equal to compare AVAs so the
limitations of that function apply here.
"""
if len(rdn1) != len(rdn2):
return False
for attr_type_1, val1, dummy in rdn1:
found = False
for attr_type_2, val2, dummy in rdn2:
if attr_type_1.lower() != attr_type_2.lower():
continue
found = True
if not is_ava_value_equal(attr_type_1, val1, val2):
return False
break
if not found:
return False
return True
def is_dn_equal(dn1, dn2):
"""Return True if and only if the DNs are equal.
Two DNs are equal if they've got the same number of RDNs and if the RDNs
are the same at each position. See RFC4517.
Note that this function uses is_rdn_equal to compare RDNs so the
limitations of that function apply here.
:param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
dn2 = ldap.dn.str2dn(utf8_encode(dn2))
if len(dn1) != len(dn2):
return False
for rdn1, rdn2 in zip(dn1, dn2):
if not is_rdn_equal(rdn1, rdn2):
return False
return True
def dn_startswith(descendant_dn, dn):
"""Return True if and only if the descendant_dn is under the dn.
:param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
dn = ldap.dn.str2dn(utf8_encode(dn))
if len(descendant_dn) <= len(dn):
return False
# Use the last len(dn) RDNs.
return is_dn_equal(descendant_dn[-len(dn):], dn)
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
"""Abstract class which defines methods for a LDAP API provider.
Native Keystone values cannot be passed directly into and from the
python-ldap API. Type conversion must occur at the LDAP API
boudary, examples of type conversions are:
* booleans map to the strings 'TRUE' and 'FALSE'
* integer values map to their string representation.
* unicode strings are encoded in UTF-8
In addition to handling type conversions at the API boundary we
have the requirement to support more than one LDAP API
provider. Currently we have:
* python-ldap, this is the standard LDAP API for Python, it
requires access to a live LDAP server.
* Fake LDAP which emulates python-ldap. This is used for
testing without requiring a live LDAP server.
To support these requirements we need a layer that performs type
conversions and then calls another LDAP API which is configurable
(e.g. either python-ldap or the fake emulation).
We have an additional constraint at the time of this writing due to
limitations in the logging module. The logging module is not
capable of accepting UTF-8 encoded strings, it will throw an
encoding exception. Therefore all logging MUST be performed prior
to UTF-8 conversion. This means no logging can be performed in the
ldap APIs that implement the python-ldap API because those APIs
are defined to accept only UTF-8 strings. Thus the layer which
performs type conversions must also do the logging. We do the type
conversions in two steps, once to convert all Python types to
unicode strings, then log, then convert the unicode strings to
UTF-8.
There are a variety of ways one could accomplish this, we elect to
use a chaining technique whereby instances of this class simply
call the next member in the chain via the "conn" attribute. The
chain is constructed by passing in an existing instance of this
class as the conn attribute when the class is instantiated.
Here is a brief explanation of why other possible approaches were
not used:
subclassing
To perform the wrapping operations in the correct order
the type convesion class would have to subclass each of
the API providers. This is awkward, doubles the number of
classes, and does not scale well. It requires the type
conversion class to be aware of all possible API
providers.
decorators
Decorators provide an elegant solution to wrap methods and
would be an ideal way to perform type conversions before
calling the wrapped function and then converting the
values returned from the wrapped function. However
decorators need to be aware of the method signature, it
has to know what input parameters need conversion and how
to convert the result. For an API like python-ldap which
has a large number of different method signatures it would
require a large number of specialized
decorators. Experience has shown it's very easy to apply
the wrong decorator due to the inherent complexity and
tendency to cut-n-paste code. Another option is to
parameterize the decorator to make it "smart". Experience
has shown such decorators become insanely complicated and
difficult to understand and debug. Also decorators tend to
hide what's really going on when a method is called, the
operations being performed are not visible when looking at
the implemation of a decorated method, this too experience
has shown leads to mistakes.
Chaining simplifies both wrapping to perform type conversion as
well as the substitution of alternative API providers. One simply
creates a new instance of the API interface and insert it at the
front of the chain. Type conversions are explicit and obvious.
If a new method needs to be added to the API interface one adds it
to the abstract class definition. Should one miss adding the new
method to any derivations of the abstract class the code will fail
to load and run making it impossible to forget updating all the
derived classes.
"""
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@abc.abstractmethod
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def set_option(self, option, invalue):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_option(self, option):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def unbind_s(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def modify_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_s(self, dn):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
class PythonLDAPHandler(LDAPHandler):
"""LDAPHandler implementation which calls the python-ldap API.
Note, the python-ldap API requires all string values to be UTF-8 encoded.
The KeystoneLDAPHandler enforces this prior to invoking the methods in this
class.
"""
def __init__(self, conn=None):
super(PythonLDAPHandler, self).__init__(conn=conn)
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
if chase_referrals is not None:
self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
def unbind_s(self):
return self.conn.unbind_s()
def add_s(self, dn, modlist):
return self.conn.add_s(dn, modlist)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return self.conn.search_s(base, scope, filterstr,
attrlist, attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
# The resp_ctrl_classes parameter is a recent addition to the
# API. It defaults to None. We do not anticipate using it.
# To run with older versions of python-ldap we do not pass it.
return self.conn.result3(msgid, all, timeout)
def modify_s(self, dn, modlist):
return self.conn.modify_s(dn, modlist)
def delete_s(self, dn):
return self.conn.delete_s(dn)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None):
"""LDAP initialization for PythonLDAPHandler and PooledLDAPHandler."""
LOG.debug("LDAP init: url=%s", url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, ldap.TLS_AVAIL)
if debug_level is not None:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
# The certificate trust options apply for both LDAPS and TLS.
if use_tls or using_ldaps:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in list(LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
tls_req_cert)
class MsgId(list):
"""Wrapper class to hold connection and msgid."""
pass
def use_conn_pool(func):
"""Use this only for connection pool specific ldap API.
This adds connection object to decorated API as next argument after self.
"""
def wrapper(self, *args, **kwargs):
# assert isinstance(self, PooledLDAPHandler)
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
class PooledLDAPHandler(LDAPHandler):
"""LDAPHandler implementation which uses pooled connection manager.
Pool specific configuration is defined in [ldap] section.
All other LDAP configuration is still used from [ldap] section
Keystone LDAP authentication logic authenticates an end user using its DN
and password via LDAP bind to establish supplied password is correct.
This can fill up the pool quickly (as pool re-uses existing connection
based on its bind data) and would not leave space in pool for connection
re-use for other LDAP operations.
Now a separate pool can be established for those requests when related flag
'use_auth_pool' is enabled. That pool can have its own size and
connection lifetime. Other pool attributes are shared between those pools.
If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
If 'use_auth_pool' is not enabled, then connection pooling is not used for
those LDAP operations.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
"""
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
connection_pools = {} # static connector pool dict
def __init__(self, conn=None, use_auth_pool=False):
super(PooledLDAPHandler, self).__init__(conn=conn)
self.who = ''
self.cred = ''
self.conn_options = {} # connection specific options
self.page_size = None
self.use_auth_pool = use_auth_pool
self.conn_pool = None
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.page_size = page_size
# Following two options are not added in common initialization as they
# need to follow a sequence in PythonLDAPHandler code.
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
if chase_referrals is not None:
self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
if self.use_auth_pool: # separate pool when use_auth_pool enabled
pool_url = self.auth_pool_prefix + url
else:
pool_url = url
try:
self.conn_pool = self.connection_pools[pool_url]
except KeyError:
self.conn_pool = ldappool.ConnectionManager(
url,
size=pool_size,
retry_max=pool_retry_max,
retry_delay=pool_retry_delay,
timeout=pool_conn_timeout,
connector_cls=self.Connector,
use_tls=use_tls,
max_lifetime=pool_conn_lifetime)
self.connection_pools[pool_url] = self.conn_pool
def set_option(self, option, invalue):
self.conn_options[option] = invalue
def get_option(self, option):
value = self.conn_options.get(option)
# if option was not specified explicitly, then use connection default
# value for that option if there.
if value is None:
with self._get_pool_connection() as conn:
value = conn.get_option(option)
return value
def _apply_options(self, conn):
# if connection has a lifetime, then it already has options specified
if conn.get_lifetime() > 30:
return
for option, invalue in self.conn_options.items():
conn.set_option(option, invalue)
def _get_pool_connection(self):
return self.conn_pool.connection(self.who, self.cred)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
# Not using use_conn_pool decorator here as this API takes cred as
# input.
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
self._apply_options(conn)
def unbind_s(self):
# After connection generator is done `with` statement execution block
# connection is always released via finally block in ldappool.
# So this unbind is a no op.
pass
@use_conn_pool
def add_s(self, conn, dn, modlist):
return conn.add_s(dn, modlist)
@use_conn_pool
def search_s(self, conn, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return conn.search_s(base, scope, filterstr, attrlist,
attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
"""Return a ``MsgId`` instance, it asynchronous API.
The ``MsgId`` instance can be safely used in a call to ``result3()``.
To work with ``result3()`` API in predictable manner, the same LDAP
connection is needed which originally provided the ``msgid``. So, this
method wraps the existing connection and ``msgid`` in a new ``MsgId``
instance. The connection associated with ``search_ext`` is released
once last hard reference to the ``MsgId`` instance is freed.
"""
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
msgid = conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
except Exception:
conn_ctxt.__exit__(*sys.exc_info())
raise
res = MsgId((conn, msgid))
weakref.ref(res, functools.partial(conn_ctxt.__exit__,
None, None, None))
return res
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
"""Wait for and return the result.
This method returns the result of an operation previously initiated by
one of the LDAP asynchronous operation routines (eg search_ext()). It
returned an invocation identifier (a message id) upon successful
initiation of their operation.
Input msgid is expected to be instance of class MsgId which has LDAP
session/connection used to execute search_ext and message idenfier.
The connection associated with search_ext is released once last hard
reference to MsgId object is freed. This will happen when function
which requested msgId and used it in result3 exits.
"""
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@use_conn_pool
def modify_s(self, conn, dn, modlist):
return conn.modify_s(dn, modlist)
@use_conn_pool
def delete_s(self, conn, dn):
return conn.delete_s(dn)
@use_conn_pool
def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
return conn.delete_ext_s(dn, serverctrls, clientctrls)
class KeystoneLDAPHandler(LDAPHandler):
"""Convert data types and perform logging.
This LDAP inteface wraps the python-ldap based interfaces. The
python-ldap interfaces require string values encoded in UTF-8. The
OpenStack logging framework at the time of this writing is not
capable of accepting strings encoded in UTF-8, the log functions
will throw decoding errors if a non-ascii character appears in a
string.
Prior to the call Python data types are converted to a string
representation as required by the LDAP APIs.
Then logging is performed so we can track what is being
sent/received from LDAP. Also the logging filters security
sensitive items (i.e. passwords).
Then the string values are encoded into UTF-8.
Then the LDAP API entry point is invoked.
Data returned from the LDAP call is converted back from UTF-8
encoded strings into the Python data type used internally in
OpenStack.
"""
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
self.page_size = 0
def __enter__(self):
"""Enter runtime context."""
return self
def _disable_paging(self):
# Disable the pagination from now on
self.page_size = 0
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None,
pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
self.page_size = page_size
return self.conn.connect(url, page_size, alias_dereferencing,
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, chase_referrals,
debug_level=debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=pool_retry_max,
pool_retry_delay=pool_retry_delay,
pool_conn_timeout=pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
LOG.debug("LDAP bind: who=%s", who)
who_utf8 = utf8_encode(who)
cred_utf8 = utf8_encode(cred)
return self.conn.simple_bind_s(who_utf8, cred_utf8,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_s(self):
LOG.debug("LDAP unbind")
return self.conn.unbind_s()
def add_s(self, dn, modlist):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in modlist]
logging_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s attrs=%s',
dn, logging_attrs)
dn_utf8 = utf8_encode(dn)
ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
for kind, values in ldap_attrs]
return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
# NOTE(morganfainberg): Remove "None" singletons from this list, which
# allows us to set mapped attributes to "None" as defaults in config.
# Without this filtering, the ldap query would raise a TypeError since
# attrlist is expected to be an iterable of strings.
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s',
base, scope, filterstr, attrlist, attrsonly)
if self.page_size:
ldap_result = self._paged_search_s(base, scope,
filterstr, attrlist)
else:
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist_utf8 = list(map(utf8_encode, attrlist))
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
py_result = convert_ldap_result(ldap_result)
return py_result
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s '
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def _paged_search_s(self, base, scope, filterstr, attrlist=None):
res = []
use_old_paging_api = False
# The API for the simple paged results control changed between
# python-ldap 2.3 and 2.4. We need to detect the capabilities
# of the python-ldap version we are using.
if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
use_old_paging_api = True
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
else:
lc = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=self.page_size,
cookie='')
page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
attrlist_utf8 = list(map(utf8_encode, attrlist))
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
# Endless loop request pages on ldap server until it has no data
while True:
# Request to the ldap server a page with 'page_size' entries
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
# Receive the data
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == page_ctrl_oid]
if pctrls:
# LDAP server supports pagination
if use_old_paging_api:
est, cookie = pctrls[0].controlValue
lc.controlValue = (self.page_size, cookie)
else:
cookie = lc.cookie = pctrls[0].cookie
if cookie:
# There is more data still on the server
# so we request another page
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
else:
# Exit condition no more data on server
break
else:
LOG.warning(_LW('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
self._disable_paging()
break
return res
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
# ldap_result returned from result3 is a tuple of
# (rtype, rdata, rmsgid, serverctrls). We don't need use of these,
# except rdata.
rtype, rdata, rmsgid, serverctrls = ldap_result
py_result = convert_ldap_result(rdata)
return py_result
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
logging_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug('LDAP modify: dn=%s modlist=%s',
dn, logging_modlist)
dn_utf8 = utf8_encode(dn)
ldap_modlist_utf8 = [
(op, kind, (None if values is None
else [utf8_encode(x) for x in safe_iter(values)]))
for op, kind, values in ldap_modlist]
return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
def delete_s(self, dn):
LOG.debug("LDAP delete: dn=%s", dn)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_s(dn_utf8)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
dn, serverctrls, clientctrls)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit runtime context, unbind LDAP."""
self.unbind_s()
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
for prefix, handler in _HANDLERS.items():
if conn_url.startswith(prefix):
return handler()
if use_pool:
return PooledLDAPHandler(use_auth_pool=use_auth_pool)
else:
return PythonLDAPHandler()
def filter_entity(entity_ref):
"""Filter out private items in an entity dict.
:param entity_ref: the entity dictionary. The 'dn' field will be removed.
'dn' is used in LDAP, but should not be returned to the user. This
value may be modified.
:returns: entity_ref
"""
if entity_ref:
entity_ref.pop('dn', None)
return entity_ref
class BaseLdap(object):
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
self.chase_referrals = conf.ldap.chase_referrals
self.debug_level = conf.ldap.debug_level
# LDAP Pool specific attribute
self.use_pool = conf.ldap.use_pool
self.pool_size = conf.ldap.pool_size
self.pool_retry_max = conf.ldap.pool_retry_max
self.pool_retry_delay = conf.ldap.pool_retry_delay
self.pool_conn_timeout = conf.ldap.pool_connection_timeout
self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
# End user authentication pool specific config attributes
self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
self.auth_pool_size = conf.ldap.auth_pool_size
self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
if self.options_name is not None:
self.suffix = conf.ldap.suffix
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in self.attribute_options_names.items():
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
ldap_filter = '%s_filter' % self.options_name
self.ldap_filter = getattr(conf.ldap,
ldap_filter) or self.DEFAULT_FILTER
allow_create = '%s_allow_create' % self.options_name
self.allow_create = getattr(conf.ldap, allow_create)
allow_update = '%s_allow_update' % self.options_name
self.allow_update = getattr(conf.ldap, allow_update)
allow_delete = '%s_allow_delete' % self.options_name
self.allow_delete = getattr(conf.ldap, allow_delete)
member_attribute = '%s_member_attribute' % self.options_name
self.member_attribute = getattr(conf.ldap, member_attribute, None)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
self.use_dumb_member = conf.ldap.use_dumb_member
self.dumb_member = (conf.ldap.dumb_member or
self.DUMB_MEMBER_DN)
self.subtree_delete_enabled = conf.ldap.allow_subtree_delete
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
def _parse_extra_attrs(self, option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except Exception:
LOG.warning(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
continue
mapping[ldap_attr] = attr_map
return mapping
def _is_dumb_member(self, member_dn):
"""Check that member is a dumb member.
:param member_dn: DN of member to be checked.
"""
return (self.use_dumb_member
and is_dn_equal(member_dn, self.dumb_member))
def get_connection(self, user=None, password=None, end_user_auth=False):
use_pool = self.use_pool
pool_size = self.pool_size
pool_conn_lifetime = self.pool_conn_lifetime
if end_user_auth:
if not self.use_auth_pool:
use_pool = False
else:
pool_size = self.auth_pool_size
pool_conn_lifetime = self.auth_pool_conn_lifetime
conn = _get_connection(self.LDAP_URL, use_pool,
use_auth_pool=end_user_auth)
conn = KeystoneLDAPHandler(conn=conn)
conn.connect(self.LDAP_URL,
page_size=self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert,
chase_referrals=self.chase_referrals,
debug_level=self.debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=self.pool_retry_max,
pool_retry_delay=self.pool_retry_delay,
pool_conn_timeout=self.pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime
)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
# if we don't have any user/pass
if user and password:
conn.simple_bind_s(user, password)
return conn
def _id_to_dn_string(self, object_id):
return u'%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(
six.text_type(object_id)),
self.tree_dn)
def _id_to_dn(self, object_id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(object_id)
with self.get_connection() as conn:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'objclass': self.object_class},
attrlist=DN_ONLY)
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(object_id)
@staticmethod
def _dn_to_id(dn):
return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
def _ldap_res_to_model(self, res):
# LDAP attribute names may be returned in a different case than
# they are defined in the mapping, so we need to check for keys
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
lower_res = {k.lower(): v for k, v in res[1].items()}
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
message = _('ID attribute %(id_attr)s not found in LDAP '
'object %(dn)s') % ({'id_attr': self.id_attr,
'dn': res[0]})
raise exception.NotFound(message=message)
if len(id_attrs) > 1:
# FIXME(gyee): if this is a multi-value attribute and it has
# multiple values, we can't use it as ID. Retain the dn_to_id
# logic here so it does not potentially break existing
# deployments. We need to fix our read-write LDAP logic so
# it does not get the ID from DN.
message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
LOG.warning(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
obj = self.model(id=id_val)
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
map_attr = self.attribute_mapping.get(k, k)
if map_attr is None:
# Ignore attributes that are mapped to None.
continue
v = lower_res[map_attr.lower()]
except KeyError: # nosec
# Didn't find the attr, so don't add it.
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def check_allow_create(self):
if not self.allow_create:
action = _('LDAP %s create') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_update(self):
if not self.allow_update:
action = _('LDAP %s update') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_delete(self):
if not self.allow_delete:
action = _('LDAP %s delete') % self.options_name
raise exception.ForbiddenAction(action=action)
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound: # nosec
# Didn't find it so it's unique, good.
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound: # nosec
# Didn't find it, so it's unique, good.
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in values.items():
if k in self.attribute_ignore:
continue
if k == 'id':
# no need to check if v is None as 'id' will always have
# a value
attrs.append((self.id_attr, [v]))
elif v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in self.extra_attr_mapping.items()
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.dumb_member]))
with self.get_connection() as conn:
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, object_id, ldap_filter=None):
query = (u'(&(%(id_attr)s=%(id)s)'
u'%(filter)s'
u'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'filter': (ldap_filter or self.ldap_filter or ''),
'object_class': self.object_class})
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_limited(self, base, scope, filterstr, attrlist, sizelimit):
with self.get_connection() as conn:
try:
control = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=sizelimit,
cookie='')
msgid = conn.search_ext(base, scope, filterstr, attrlist,
serverctrls=[control])
rdata = conn.result3(msgid)
return rdata
except ldap.NO_SUCH_OBJECT:
return []
@driver_hints.truncated
def _ldap_get_all(self, hints, ldap_filter=None):
query = u'(&%s(objectClass=%s)(%s=*))' % (
ldap_filter or self.ldap_filter or '',
self.object_class,
self.id_attr)
sizelimit = 0
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
if hints.limit:
sizelimit = hints.limit['limit']
return self._ldap_get_limited(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs,
sizelimit)
with self.get_connection() as conn:
try:
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return []
def _ldap_get_list(self, search_base, scope, query_params=None,
attrlist=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
def calc_filter(attrname, value):
val_esc = ldap.filter.escape_filter_chars(value)
return '(%s=%s)' % (attrname, val_esc)
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
query_params.items()])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
def get(self, object_id, ldap_filter=None):
res = self._ldap_get(object_id, ldap_filter)
if res is None:
raise self._not_found(object_id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, ldap_filter=None):
query = (u'(%s=%s)' % (self.attribute_mapping['name'],
ldap.filter.escape_filter_chars(
six.text_type(name))))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, ldap_filter=None, hints=None):
hints = hints or driver_hints.Hints()
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(hints, ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(object_id)
modlist = []
for k, v in values.items():
if k == 'id':
# id can't be modified.
continue
if k in self.attribute_ignore:
# Handle 'enabled' specially since can't disable if ignored.
if k == 'enabled' and (not v):
action = _("Disabling an entity where the 'enable' "
"attribute is ignored by configuration.")
raise exception.ForbiddenAction(action=action)
continue
# attribute value has not changed
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
with self.get_connection() as conn:
try:
conn.modify_s(self._id_to_dn(object_id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
return self.get(object_id)
def delete(self, object_id):
with self.get_connection() as conn:
try:
conn.delete_s(self._id_to_dn(object_id))
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
def delete_tree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
with self.get_connection() as conn:
try:
conn.delete_ext_s(self._id_to_dn(object_id),
serverctrls=[tree_delete_control])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
except ldap.NOT_ALLOWED_ON_NONLEAF:
# Most LDAP servers do not support the tree_delete_control.
# In these servers, the usual idiom is to first perform a
# search to get the entries to delete, then delete them
# in order of child to parent, since LDAP forbids the
# deletion of a parent entry before deleting the children
# of that parent. The simplest way to do that is to delete
# the entries in order of the length of the DN, from longest
# to shortest DN.
dn = self._id_to_dn(object_id)
scope = ldap.SCOPE_SUBTREE
# With some directory servers, an entry with objectclass
# ldapsubentry will not be returned unless it is explicitly
# requested, by specifying the objectclass in the search
# filter. We must specify this, with objectclass=*, in an
# LDAP filter OR clause, in order to return all entries
filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
# We only need the DNs of the entries. Since no attributes
# will be returned, we do not have to specify attrsonly=1.
entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
if entries:
for dn in sorted((e[0] for e in entries),
key=len, reverse=True):
conn.delete_s(dn)
else:
LOG.debug('No entries in LDAP subtree %s', dn)
def add_member(self, member_dn, member_list_dn):
"""Add member to the member list.
:param member_dn: DN of member to be added.
:param member_list_dn: DN of group to which the
member will be added.
:raises keystone.exception.Conflict: If the user was already a member.
:raises self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.TYPE_OR_VALUE_EXISTS:
raise exception.Conflict(_('Member %(member)s '
'is already a member'
' of group %(group)s') % {
'member': member_dn,
'group': member_list_dn})
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def remove_member(self, member_dn, member_list_dn):
"""Remove member from the member list.
:param member_dn: DN of member to be removed.
:param member_list_dn: DN of group from which the
member will be removed.
:raises self.NotFound: If the group entry didn't exist.
:raises ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def _delete_tree_nodes(self, search_base, scope, query_params=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
query = (u'(&%s%s)' %
(query, ''.join(['(%s=%s)'
% (k, ldap.filter.escape_filter_chars(v))
for k, v in
query_params.items()])))
not_deleted_nodes = []
with self.get_connection() as conn:
try:
nodes = conn.search_s(search_base, scope, query,
attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
LOG.debug('Could not find entry with dn=%s', search_base)
raise self._not_found(self._dn_to_id(search_base))
else:
for node_dn, _t in nodes:
try:
conn.delete_s(node_dn)
except ldap.NO_SUCH_OBJECT:
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
LOG.warning(_LW("When deleting entries for %(search_base)s, "
"could not delete nonexistent entries "
"%(entries)s%(dots)s"),
{'search_base': search_base,
'entries': not_deleted_nodes[:3],
'dots': '...' if len(not_deleted_nodes) > 3 else ''})
def filter_query(self, hints, query=None):
"""Apply filtering to a query.
:param hints: contains the list of filters, which may be None,
indicating that there are no filters to be applied.
If it's not None, then any filters satisfied here will be
removed so that the caller will know if any filters
remain to be applied.
:param query: LDAP query into which to include filters
:returns query: LDAP query, updated with any filters satisfied
"""
def build_filter(filter_, hints):
"""Build a filter for the query.
:param filter_: the dict that describes this filter
:param hints: contains the list of filters yet to be satisfied.
:returns query: LDAP query term to be added
"""
ldap_attr = self.attribute_mapping[filter_['name']]
val_esc = ldap.filter.escape_filter_chars(filter_['value'])
if filter_['case_sensitive']:
# NOTE(henry-nash): Although dependent on the schema being
# used, most LDAP attributes are configured with case
# insensitive matching rules, so we'll leave this to the
# controller to filter.
return
if filter_['name'] == 'enabled':
# NOTE(henry-nash): Due to the different options for storing
# the enabled attribute (e,g, emulated or not), for now we
# don't try and filter this at the driver level - we simply
# leave the filter to be handled by the controller. It seems
# unlikley that this will cause a signifcant performance
# issue.
return
# TODO(henry-nash): Currently there are no booleans (other than
# 'enabled' that is handled above) on which you can filter. If
# there were, we would need to add special handling here to
# convert the booleans values to 'TRUE' and 'FALSE'. To do that
# we would also need to know which filter keys were actually
# booleans (this is related to bug #1411478).
if filter_['comparator'] == 'equals':
query_term = (u'(%(attr)s=%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'contains':
query_term = (u'(%(attr)s=*%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'startswith':
query_term = (u'(%(attr)s=%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'endswith':
query_term = (u'(%(attr)s=*%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
else:
# It's a filter we don't understand, so let the caller
# work out if they need to do something with it.
return
return query_term
if query is None:
# make sure query is a string so the ldap filter is properly
# constructed from filter_list later
query = ''
if hints is None:
return query
filter_list = []
satisfied_filters = []
for filter_ in hints.filters:
if filter_['name'] not in self.attribute_mapping:
continue
new_filter = build_filter(filter_, hints)
if new_filter is not None:
filter_list.append(new_filter)
satisfied_filters.append(filter_)
if filter_list:
query = u'(&%s%s)' % (query, ''.join(filter_list))
# Remove satisfied filters, then the caller will know remaining filters
for filter_ in satisfied_filters:
hints.filters.remove(filter_)
return query
class EnabledEmuMixIn(BaseLdap):
"""Emulates boolean 'enabled' attribute if turned on.
Creates a group holding all enabled objects of this class, all missing
objects are considered disabled.
Options:
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that group, default is
cn=enabled_${name}s,${tree_dn}
* $name_enabled_emulation_use_group_config - boolean, on/off
Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
${tree_dn} is self.tree_dn.
"""
DEFAULT_GROUP_OBJECTCLASS = 'groupOfNames'
DEFAULT_MEMBER_ATTRIBUTE = 'member'
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
use_group_config = ('%s_enabled_emulation_use_group_config' %
self.options_name)
self.use_group_config = getattr(conf.ldap, use_group_config)
if not self.use_group_config:
self.member_attribute = self.DEFAULT_MEMBER_ATTRIBUTE
self.group_objectclass = self.DEFAULT_GROUP_OBJECTCLASS
else:
self.member_attribute = conf.ldap.group_member_attribute
self.group_objectclass = conf.ldap.group_objectclass
if not self.enabled_emulation_dn:
naming_attr_name = 'cn'
naming_attr_value = 'enabled_%ss' % self.options_name
sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
naming_attr = (naming_attr_name, [naming_attr_value])
else:
# Extract the attribute name and value from the configured DN.
naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
naming_rdn = naming_dn[0][0]
naming_attr = (utf8_decode(naming_rdn[0]),
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
def _get_enabled(self, object_id, conn):
dn = self._id_to_dn(object_id)
query = '(%s=%s)' % (self.member_attribute,
ldap.filter.escape_filter_chars(dn))
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
def _add_enabled(self, object_id):
with self.get_connection() as conn:
if not self._get_enabled(object_id, conn):
modlist = [(ldap.MOD_ADD,
self.member_attribute,
[self._id_to_dn(object_id)])]
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', [self.group_objectclass]),
(self.member_attribute,
[self._id_to_dn(object_id)]),
self.enabled_emulation_naming_attr]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
def _remove_enabled(self, object_id):
modlist = [(ldap.MOD_DELETE,
self.member_attribute,
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE): # nosec
# It's already gone, good.
pass
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
with self.get_connection() as conn:
ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
if ('enabled' not in self.attribute_ignore and
self.enabled_emulation):
ref['enabled'] = self._get_enabled(object_id, conn)
return ref
def get_all(self, ldap_filter=None, hints=None):
hints = hints or driver_hints.Hints()
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
obj_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(hints, ldap_filter)
if x[0] != self.enabled_emulation_dn]
with self.get_connection() as conn:
for obj_ref in obj_list:
obj_ref['enabled'] = self._get_enabled(
obj_ref['id'], conn)
return obj_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter, hints)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
def delete(self, object_id):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
| 40.136364 | 79 | 0.590568 |
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap.controls
import ldap.filter
import ldappool
from oslo_log import log
from oslo_utils import reflection
import six
from six.moves import map, zip
from keystone.common import driver_hints
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
def utf8_encode(value):
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
value_cls_name = reflection.get_class_name(
value, fully_qualified=False)
raise TypeError("value must be basestring, "
"not %s" % value_cls_name)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def enabled2py(val):
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError: # nosec
# It wasn't an int either, will try as utf8 instead.
pass
return utf8_decode(val)
def ldap2py(val):
return utf8_decode(val)
def convert_ldap_result(ldap_result):
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
at_least_one_referral = True
continue
for kind, values in attrs.items():
try:
val2py = enabled2py if kind == 'enabled' else ldap2py
ldap_attrs[kind] = [val2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
if len(rdn1) != len(rdn2):
return False
for attr_type_1, val1, dummy in rdn1:
found = False
for attr_type_2, val2, dummy in rdn2:
if attr_type_1.lower() != attr_type_2.lower():
continue
found = True
if not is_ava_value_equal(attr_type_1, val1, val2):
return False
break
if not found:
return False
return True
def is_dn_equal(dn1, dn2):
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
dn2 = ldap.dn.str2dn(utf8_encode(dn2))
if len(dn1) != len(dn2):
return False
for rdn1, rdn2 in zip(dn1, dn2):
if not is_rdn_equal(rdn1, rdn2):
return False
return True
def dn_startswith(descendant_dn, dn):
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
dn = ldap.dn.str2dn(utf8_encode(dn))
if len(descendant_dn) <= len(dn):
return False
return is_dn_equal(descendant_dn[-len(dn):], dn)
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@abc.abstractmethod
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
raise exception.NotImplemented()
@abc.abstractmethod
def set_option(self, option, invalue):
raise exception.NotImplemented()
@abc.abstractmethod
def get_option(self, option):
raise exception.NotImplemented()
@abc.abstractmethod
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
raise exception.NotImplemented()
@abc.abstractmethod
def unbind_s(self):
raise exception.NotImplemented()
@abc.abstractmethod
def add_s(self, dn, modlist):
raise exception.NotImplemented()
@abc.abstractmethod
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
raise exception.NotImplemented()
@abc.abstractmethod
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
raise exception.NotImplemented()
@abc.abstractmethod
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
raise exception.NotImplemented()
@abc.abstractmethod
def modify_s(self, dn, modlist):
raise exception.NotImplemented()
@abc.abstractmethod
def delete_s(self, dn):
raise exception.NotImplemented()
@abc.abstractmethod
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
raise exception.NotImplemented()
class PythonLDAPHandler(LDAPHandler):
def __init__(self, conn=None):
super(PythonLDAPHandler, self).__init__(conn=conn)
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
if chase_referrals is not None:
self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
def unbind_s(self):
return self.conn.unbind_s()
def add_s(self, dn, modlist):
return self.conn.add_s(dn, modlist)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return self.conn.search_s(base, scope, filterstr,
attrlist, attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
return self.conn.result3(msgid, all, timeout)
def modify_s(self, dn, modlist):
return self.conn.modify_s(dn, modlist)
def delete_s(self, dn):
return self.conn.delete_s(dn)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None):
LOG.debug("LDAP init: url=%s", url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, ldap.TLS_AVAIL)
if debug_level is not None:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
if use_tls or using_ldaps:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in list(LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
tls_req_cert)
class MsgId(list):
pass
def use_conn_pool(func):
def wrapper(self, *args, **kwargs):
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
class PooledLDAPHandler(LDAPHandler):
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
connection_pools = {}
def __init__(self, conn=None, use_auth_pool=False):
super(PooledLDAPHandler, self).__init__(conn=conn)
self.who = ''
self.cred = ''
self.conn_options = {}
self.page_size = None
self.use_auth_pool = use_auth_pool
self.conn_pool = None
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.page_size = page_size
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
if chase_referrals is not None:
self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
if self.use_auth_pool:
pool_url = self.auth_pool_prefix + url
else:
pool_url = url
try:
self.conn_pool = self.connection_pools[pool_url]
except KeyError:
self.conn_pool = ldappool.ConnectionManager(
url,
size=pool_size,
retry_max=pool_retry_max,
retry_delay=pool_retry_delay,
timeout=pool_conn_timeout,
connector_cls=self.Connector,
use_tls=use_tls,
max_lifetime=pool_conn_lifetime)
self.connection_pools[pool_url] = self.conn_pool
def set_option(self, option, invalue):
self.conn_options[option] = invalue
def get_option(self, option):
value = self.conn_options.get(option)
if value is None:
with self._get_pool_connection() as conn:
value = conn.get_option(option)
return value
def _apply_options(self, conn):
if conn.get_lifetime() > 30:
return
for option, invalue in self.conn_options.items():
conn.set_option(option, invalue)
def _get_pool_connection(self):
return self.conn_pool.connection(self.who, self.cred)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
self._apply_options(conn)
def unbind_s(self):
pass
@use_conn_pool
def add_s(self, conn, dn, modlist):
return conn.add_s(dn, modlist)
@use_conn_pool
def search_s(self, conn, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return conn.search_s(base, scope, filterstr, attrlist,
attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
msgid = conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
except Exception:
conn_ctxt.__exit__(*sys.exc_info())
raise
res = MsgId((conn, msgid))
weakref.ref(res, functools.partial(conn_ctxt.__exit__,
None, None, None))
return res
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@use_conn_pool
def modify_s(self, conn, dn, modlist):
return conn.modify_s(dn, modlist)
@use_conn_pool
def delete_s(self, conn, dn):
return conn.delete_s(dn)
@use_conn_pool
def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
return conn.delete_ext_s(dn, serverctrls, clientctrls)
class KeystoneLDAPHandler(LDAPHandler):
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
self.page_size = 0
def __enter__(self):
return self
def _disable_paging(self):
self.page_size = 0
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None,
pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
self.page_size = page_size
return self.conn.connect(url, page_size, alias_dereferencing,
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, chase_referrals,
debug_level=debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=pool_retry_max,
pool_retry_delay=pool_retry_delay,
pool_conn_timeout=pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
LOG.debug("LDAP bind: who=%s", who)
who_utf8 = utf8_encode(who)
cred_utf8 = utf8_encode(cred)
return self.conn.simple_bind_s(who_utf8, cred_utf8,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_s(self):
LOG.debug("LDAP unbind")
return self.conn.unbind_s()
def add_s(self, dn, modlist):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in modlist]
logging_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s attrs=%s',
dn, logging_attrs)
dn_utf8 = utf8_encode(dn)
ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
for kind, values in ldap_attrs]
return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s',
base, scope, filterstr, attrlist, attrsonly)
if self.page_size:
ldap_result = self._paged_search_s(base, scope,
filterstr, attrlist)
else:
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist_utf8 = list(map(utf8_encode, attrlist))
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
py_result = convert_ldap_result(ldap_result)
return py_result
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s '
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def _paged_search_s(self, base, scope, filterstr, attrlist=None):
res = []
use_old_paging_api = False
if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
use_old_paging_api = True
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
else:
lc = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=self.page_size,
cookie='')
page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
attrlist_utf8 = list(map(utf8_encode, attrlist))
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
while True:
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == page_ctrl_oid]
if pctrls:
if use_old_paging_api:
est, cookie = pctrls[0].controlValue
lc.controlValue = (self.page_size, cookie)
else:
cookie = lc.cookie = pctrls[0].cookie
if cookie:
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
else:
break
else:
LOG.warning(_LW('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
self._disable_paging()
break
return res
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
# except rdata.
rtype, rdata, rmsgid, serverctrls = ldap_result
py_result = convert_ldap_result(rdata)
return py_result
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
logging_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug('LDAP modify: dn=%s modlist=%s',
dn, logging_modlist)
dn_utf8 = utf8_encode(dn)
ldap_modlist_utf8 = [
(op, kind, (None if values is None
else [utf8_encode(x) for x in safe_iter(values)]))
for op, kind, values in ldap_modlist]
return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
def delete_s(self, dn):
LOG.debug("LDAP delete: dn=%s", dn)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_s(dn_utf8)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
dn, serverctrls, clientctrls)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
def __exit__(self, exc_type, exc_val, exc_tb):
self.unbind_s()
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
for prefix, handler in _HANDLERS.items():
if conn_url.startswith(prefix):
return handler()
if use_pool:
return PooledLDAPHandler(use_auth_pool=use_auth_pool)
else:
return PythonLDAPHandler()
def filter_entity(entity_ref):
if entity_ref:
entity_ref.pop('dn', None)
return entity_ref
class BaseLdap(object):
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
self.chase_referrals = conf.ldap.chase_referrals
self.debug_level = conf.ldap.debug_level
# LDAP Pool specific attribute
self.use_pool = conf.ldap.use_pool
self.pool_size = conf.ldap.pool_size
self.pool_retry_max = conf.ldap.pool_retry_max
self.pool_retry_delay = conf.ldap.pool_retry_delay
self.pool_conn_timeout = conf.ldap.pool_connection_timeout
self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
# End user authentication pool specific config attributes
self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
self.auth_pool_size = conf.ldap.auth_pool_size
self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
if self.options_name is not None:
self.suffix = conf.ldap.suffix
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in self.attribute_options_names.items():
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
ldap_filter = '%s_filter' % self.options_name
self.ldap_filter = getattr(conf.ldap,
ldap_filter) or self.DEFAULT_FILTER
allow_create = '%s_allow_create' % self.options_name
self.allow_create = getattr(conf.ldap, allow_create)
allow_update = '%s_allow_update' % self.options_name
self.allow_update = getattr(conf.ldap, allow_update)
allow_delete = '%s_allow_delete' % self.options_name
self.allow_delete = getattr(conf.ldap, allow_delete)
member_attribute = '%s_member_attribute' % self.options_name
self.member_attribute = getattr(conf.ldap, member_attribute, None)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
self.use_dumb_member = conf.ldap.use_dumb_member
self.dumb_member = (conf.ldap.dumb_member or
self.DUMB_MEMBER_DN)
self.subtree_delete_enabled = conf.ldap.allow_subtree_delete
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
def _parse_extra_attrs(self, option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except Exception:
LOG.warning(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
continue
mapping[ldap_attr] = attr_map
return mapping
def _is_dumb_member(self, member_dn):
return (self.use_dumb_member
and is_dn_equal(member_dn, self.dumb_member))
def get_connection(self, user=None, password=None, end_user_auth=False):
use_pool = self.use_pool
pool_size = self.pool_size
pool_conn_lifetime = self.pool_conn_lifetime
if end_user_auth:
if not self.use_auth_pool:
use_pool = False
else:
pool_size = self.auth_pool_size
pool_conn_lifetime = self.auth_pool_conn_lifetime
conn = _get_connection(self.LDAP_URL, use_pool,
use_auth_pool=end_user_auth)
conn = KeystoneLDAPHandler(conn=conn)
conn.connect(self.LDAP_URL,
page_size=self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert,
chase_referrals=self.chase_referrals,
debug_level=self.debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=self.pool_retry_max,
pool_retry_delay=self.pool_retry_delay,
pool_conn_timeout=self.pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime
)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
if user and password:
conn.simple_bind_s(user, password)
return conn
def _id_to_dn_string(self, object_id):
return u'%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(
six.text_type(object_id)),
self.tree_dn)
def _id_to_dn(self, object_id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(object_id)
with self.get_connection() as conn:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'objclass': self.object_class},
attrlist=DN_ONLY)
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(object_id)
@staticmethod
def _dn_to_id(dn):
return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
def _ldap_res_to_model(self, res):
# LDAP attribute names may be returned in a different case than
# they are defined in the mapping, so we need to check for keys
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
lower_res = {k.lower(): v for k, v in res[1].items()}
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
message = _('ID attribute %(id_attr)s not found in LDAP '
'object %(dn)s') % ({'id_attr': self.id_attr,
'dn': res[0]})
raise exception.NotFound(message=message)
if len(id_attrs) > 1:
# FIXME(gyee): if this is a multi-value attribute and it has
# multiple values, we can't use it as ID. Retain the dn_to_id
message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
LOG.warning(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
obj = self.model(id=id_val)
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
map_attr = self.attribute_mapping.get(k, k)
if map_attr is None:
continue
v = lower_res[map_attr.lower()]
except KeyError:
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def check_allow_create(self):
if not self.allow_create:
action = _('LDAP %s create') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_update(self):
if not self.allow_update:
action = _('LDAP %s update') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_delete(self):
if not self.allow_delete:
action = _('LDAP %s delete') % self.options_name
raise exception.ForbiddenAction(action=action)
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in values.items():
if k in self.attribute_ignore:
continue
if k == 'id':
attrs.append((self.id_attr, [v]))
elif v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in self.extra_attr_mapping.items()
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.dumb_member]))
with self.get_connection() as conn:
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, object_id, ldap_filter=None):
query = (u'(&(%(id_attr)s=%(id)s)'
u'%(filter)s'
u'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'filter': (ldap_filter or self.ldap_filter or ''),
'object_class': self.object_class})
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_limited(self, base, scope, filterstr, attrlist, sizelimit):
with self.get_connection() as conn:
try:
control = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=sizelimit,
cookie='')
msgid = conn.search_ext(base, scope, filterstr, attrlist,
serverctrls=[control])
rdata = conn.result3(msgid)
return rdata
except ldap.NO_SUCH_OBJECT:
return []
@driver_hints.truncated
def _ldap_get_all(self, hints, ldap_filter=None):
query = u'(&%s(objectClass=%s)(%s=*))' % (
ldap_filter or self.ldap_filter or '',
self.object_class,
self.id_attr)
sizelimit = 0
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
if hints.limit:
sizelimit = hints.limit['limit']
return self._ldap_get_limited(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs,
sizelimit)
with self.get_connection() as conn:
try:
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return []
def _ldap_get_list(self, search_base, scope, query_params=None,
attrlist=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
def calc_filter(attrname, value):
val_esc = ldap.filter.escape_filter_chars(value)
return '(%s=%s)' % (attrname, val_esc)
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
query_params.items()])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
def get(self, object_id, ldap_filter=None):
res = self._ldap_get(object_id, ldap_filter)
if res is None:
raise self._not_found(object_id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, ldap_filter=None):
query = (u'(%s=%s)' % (self.attribute_mapping['name'],
ldap.filter.escape_filter_chars(
six.text_type(name))))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, ldap_filter=None, hints=None):
hints = hints or driver_hints.Hints()
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(hints, ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(object_id)
modlist = []
for k, v in values.items():
if k == 'id':
continue
if k in self.attribute_ignore:
# Handle 'enabled' specially since can't disable if ignored.
if k == 'enabled' and (not v):
action = _("Disabling an entity where the 'enable' "
"attribute is ignored by configuration.")
raise exception.ForbiddenAction(action=action)
continue
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
with self.get_connection() as conn:
try:
conn.modify_s(self._id_to_dn(object_id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
return self.get(object_id)
def delete(self, object_id):
with self.get_connection() as conn:
try:
conn.delete_s(self._id_to_dn(object_id))
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
def delete_tree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
with self.get_connection() as conn:
try:
conn.delete_ext_s(self._id_to_dn(object_id),
serverctrls=[tree_delete_control])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
except ldap.NOT_ALLOWED_ON_NONLEAF:
dn = self._id_to_dn(object_id)
scope = ldap.SCOPE_SUBTREE
filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
if entries:
for dn in sorted((e[0] for e in entries),
key=len, reverse=True):
conn.delete_s(dn)
else:
LOG.debug('No entries in LDAP subtree %s', dn)
def add_member(self, member_dn, member_list_dn):
with self.get_connection() as conn:
try:
mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.TYPE_OR_VALUE_EXISTS:
raise exception.Conflict(_('Member %(member)s '
'is already a member'
' of group %(group)s') % {
'member': member_dn,
'group': member_list_dn})
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def remove_member(self, member_dn, member_list_dn):
with self.get_connection() as conn:
try:
mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def _delete_tree_nodes(self, search_base, scope, query_params=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
query = (u'(&%s%s)' %
(query, ''.join(['(%s=%s)'
% (k, ldap.filter.escape_filter_chars(v))
for k, v in
query_params.items()])))
not_deleted_nodes = []
with self.get_connection() as conn:
try:
nodes = conn.search_s(search_base, scope, query,
attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
LOG.debug('Could not find entry with dn=%s', search_base)
raise self._not_found(self._dn_to_id(search_base))
else:
for node_dn, _t in nodes:
try:
conn.delete_s(node_dn)
except ldap.NO_SUCH_OBJECT:
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
LOG.warning(_LW("When deleting entries for %(search_base)s, "
"could not delete nonexistent entries "
"%(entries)s%(dots)s"),
{'search_base': search_base,
'entries': not_deleted_nodes[:3],
'dots': '...' if len(not_deleted_nodes) > 3 else ''})
def filter_query(self, hints, query=None):
def build_filter(filter_, hints):
ldap_attr = self.attribute_mapping[filter_['name']]
val_esc = ldap.filter.escape_filter_chars(filter_['value'])
if filter_['case_sensitive']:
# controller to filter.
return
if filter_['name'] == 'enabled':
# NOTE(henry-nash): Due to the different options for storing
# the enabled attribute (e,g, emulated or not), for now we
# don't try and filter this at the driver level - we simply
return
if filter_['comparator'] == 'equals':
query_term = (u'(%(attr)s=%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'contains':
query_term = (u'(%(attr)s=*%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'startswith':
query_term = (u'(%(attr)s=%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'endswith':
query_term = (u'(%(attr)s=*%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
else:
return
return query_term
if query is None:
query = ''
if hints is None:
return query
filter_list = []
satisfied_filters = []
for filter_ in hints.filters:
if filter_['name'] not in self.attribute_mapping:
continue
new_filter = build_filter(filter_, hints)
if new_filter is not None:
filter_list.append(new_filter)
satisfied_filters.append(filter_)
if filter_list:
query = u'(&%s%s)' % (query, ''.join(filter_list))
for filter_ in satisfied_filters:
hints.filters.remove(filter_)
return query
class EnabledEmuMixIn(BaseLdap):
DEFAULT_GROUP_OBJECTCLASS = 'groupOfNames'
DEFAULT_MEMBER_ATTRIBUTE = 'member'
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
use_group_config = ('%s_enabled_emulation_use_group_config' %
self.options_name)
self.use_group_config = getattr(conf.ldap, use_group_config)
if not self.use_group_config:
self.member_attribute = self.DEFAULT_MEMBER_ATTRIBUTE
self.group_objectclass = self.DEFAULT_GROUP_OBJECTCLASS
else:
self.member_attribute = conf.ldap.group_member_attribute
self.group_objectclass = conf.ldap.group_objectclass
if not self.enabled_emulation_dn:
naming_attr_name = 'cn'
naming_attr_value = 'enabled_%ss' % self.options_name
sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
naming_attr = (naming_attr_name, [naming_attr_value])
else:
naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
naming_rdn = naming_dn[0][0]
naming_attr = (utf8_decode(naming_rdn[0]),
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
def _get_enabled(self, object_id, conn):
dn = self._id_to_dn(object_id)
query = '(%s=%s)' % (self.member_attribute,
ldap.filter.escape_filter_chars(dn))
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
def _add_enabled(self, object_id):
with self.get_connection() as conn:
if not self._get_enabled(object_id, conn):
modlist = [(ldap.MOD_ADD,
self.member_attribute,
[self._id_to_dn(object_id)])]
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', [self.group_objectclass]),
(self.member_attribute,
[self._id_to_dn(object_id)]),
self.enabled_emulation_naming_attr]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
def _remove_enabled(self, object_id):
modlist = [(ldap.MOD_DELETE,
self.member_attribute,
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
pass
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
with self.get_connection() as conn:
ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
if ('enabled' not in self.attribute_ignore and
self.enabled_emulation):
ref['enabled'] = self._get_enabled(object_id, conn)
return ref
def get_all(self, ldap_filter=None, hints=None):
hints = hints or driver_hints.Hints()
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
obj_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(hints, ldap_filter)
if x[0] != self.enabled_emulation_dn]
with self.get_connection() as conn:
for obj_ref in obj_list:
obj_ref['enabled'] = self._get_enabled(
obj_ref['id'], conn)
return obj_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter, hints)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
def delete(self, object_id):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
| true | true |
1c3161819e86146482e457f2ccd896d9d5d1d5de | 37,217 | py | Python | get_map.py | yoqi/yolov4-pytorch-jinshuquexian | cea88e5cf51bfa15590a6bb0a68c63701985d7bf | [
"MIT"
] | null | null | null | get_map.py | yoqi/yolov4-pytorch-jinshuquexian | cea88e5cf51bfa15590a6bb0a68c63701985d7bf | [
"MIT"
] | null | null | null | get_map.py | yoqi/yolov4-pytorch-jinshuquexian | cea88e5cf51bfa15590a6bb0a68c63701985d7bf | [
"MIT"
] | null | null | null | import argparse
import glob
import json
import math
import operator
import os
import shutil
import sys
import numpy as np
#----------------------------------------------------#
# 用于计算 mAP
# 代码克隆自 https://github.com/Cartucho/mAP
#----------------------------------------------------#
MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
parser = argparse.ArgumentParser()
parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
# argparse receiving list of classes to be ignored
parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
# argparse receiving list of classes with specific IoU (e.g., python main.py --set-class-iou person 0.7)
parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
args = parser.parse_args()
'''
0,0 ------> x (width)
|
| (Left,Top)
| *_________
| | |
| |
y |_________|
(height) *
(Right,Bottom)
'''
# if there are no classes to ignore then replace None by empty list
if args.ignore is None:
args.ignore = []
specific_iou_flagged = False
if args.set_class_iou is not None:
specific_iou_flagged = True
# make sure that the cwd() is the location of the python script (so that every path makes sense)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
GT_PATH = os.path.join(os.getcwd(), 'input', 'ground-truth')
DR_PATH = os.path.join(os.getcwd(), 'input', 'detection-results')
# if there are no images then no animation can be shown
IMG_PATH = os.path.join(os.getcwd(), 'input', 'images-optional')
if os.path.exists(IMG_PATH):
for dirpath, dirnames, files in os.walk(IMG_PATH):
if not files:
# no image files found
args.no_animation = True
else:
args.no_animation = True
# try to import OpenCV if the user didn't choose the option --no-animation
show_animation = False
if not args.no_animation:
try:
import cv2
show_animation = True
except ImportError:
print("\"opencv-python\" not found, please install to visualize the results.")
args.no_animation = True
# try to import Matplotlib if the user didn't choose the option --no-plot
draw_plot = False
if not args.no_plot:
try:
import matplotlib.pyplot as plt
draw_plot = True
except ImportError:
print("\"matplotlib\" not found, please install it to get the resulting plots.")
args.no_plot = True
def log_average_miss_rate(precision, fp_cumsum, num_images):
"""
log-average miss rate:
Calculated by averaging miss rates at 9 evenly spaced FPPI points
between 10e-2 and 10e0, in log-space.
output:
lamr | log-average miss rate
mr | miss rate
fppi | false positives per image
references:
[1] Dollar, Piotr, et al. "Pedestrian Detection: An Evaluation of the
State of the Art." Pattern Analysis and Machine Intelligence, IEEE
Transactions on 34.4 (2012): 743 - 761.
"""
# if there were no detections of that class
if precision.size == 0:
lamr = 0
mr = 1
fppi = 0
return lamr, mr, fppi
fppi = fp_cumsum / float(num_images)
mr = (1 - precision)
fppi_tmp = np.insert(fppi, 0, -1.0)
mr_tmp = np.insert(mr, 0, 1.0)
# Use 9 evenly spaced reference points in log-space
ref = np.logspace(-2.0, 0.0, num = 9)
for i, ref_i in enumerate(ref):
# np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0
j = np.where(fppi_tmp <= ref_i)[-1][-1]
ref[i] = mr_tmp[j]
# log(0) is undefined, so we use the np.maximum(1e-10, ref)
lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
return lamr, mr, fppi
"""
throw error and exit
"""
def error(msg):
print(msg)
sys.exit(0)
"""
check if the number is a float between 0.0 and 1.0
"""
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
"""
Calculate the AP given the recall and precision array
1st) We compute a version of the measured precision/recall curve with
precision monotonically decreasing
2nd) We compute the AP as the area under this curve by numerical integration.
"""
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
"""
Convert the lines of a file to a list
"""
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
"""
Draws text in image
"""
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
"""
Plot - adjust axes
"""
def adjust_axes(r, t, fig, axes):
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
"""
Draw plot using Matplotlib
"""
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in:
- green -> TP: True Positives (object detected and matches ground-truth)
- red -> FP: False Positives (object detected but does not match ground-truth)
- orange -> FN: False Negatives (object not detected but present in the ground-truth)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close()
"""
Create a ".temp_files/" and "results/" directory
"""
TEMP_FILES_PATH = ".temp_files"
if not os.path.exists(TEMP_FILES_PATH): # if it doesn't exist already
os.makedirs(TEMP_FILES_PATH)
results_files_path = "results"
if os.path.exists(results_files_path): # if it exist already
# reset the results directory
shutil.rmtree(results_files_path)
os.makedirs(results_files_path)
if draw_plot:
os.makedirs(os.path.join(results_files_path, "AP"))
os.makedirs(os.path.join(results_files_path, "F1"))
os.makedirs(os.path.join(results_files_path, "Recall"))
os.makedirs(os.path.join(results_files_path, "Precision"))
if show_animation:
os.makedirs(os.path.join(results_files_path, "images", "detections_one_by_one"))
"""
ground-truth
Load each of the ground-truth files into a temporary ".json" file.
Create a list of all the class names present in the ground-truth (gt_classes).
"""
# get a list with the ground-truth files
ground_truth_files_list = glob.glob(GT_PATH + '/*.txt')
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
ground_truth_files_list.sort()
# dictionary with counter per class
gt_counter_per_class = {}
counter_images_per_class = {}
for txt_file in ground_truth_files_list:
#print(txt_file)
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# check if there is a correspondent detection-results file
temp_path = os.path.join(DR_PATH, (file_id + ".txt"))
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines_list = file_lines_to_list(txt_file)
# create ground-truth dictionary
bounding_boxes = []
is_difficult = False
already_seen_classes = []
for line in lines_list:
try:
if "difficult" in line:
class_name, left, top, right, bottom, _difficult = line.split()
is_difficult = True
else:
class_name, left, top, right, bottom = line.split()
except:
if "difficult" in line:
line_split = line.split()
_difficult = line_split[-1]
bottom = line_split[-2]
right = line_split[-3]
top = line_split[-4]
left = line_split[-5]
class_name = ""
for name in line_split[:-5]:
class_name += name
is_difficult = True
else:
line_split = line.split()
bottom = line_split[-1]
right = line_split[-2]
top = line_split[-3]
left = line_split[-4]
class_name = ""
for name in line_split[:-4]:
class_name += name
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
bbox = left + " " + top + " " + right + " " +bottom
if is_difficult:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False, "difficult":True})
is_difficult = False
else:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
# count that object
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
gt_counter_per_class[class_name] = 1
if class_name not in already_seen_classes:
if class_name in counter_images_per_class:
counter_images_per_class[class_name] += 1
else:
# if class didn't exist yet
counter_images_per_class[class_name] = 1
already_seen_classes.append(class_name)
# dump bounding_boxes into a ".json" file
with open(TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# let's sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
#print(gt_classes)
#print(gt_counter_per_class)
"""
Check format of the flag --set-class-iou (if used)
e.g. check if class exists
"""
if specific_iou_flagged:
n_args = len(args.set_class_iou)
error_msg = \
'\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]'
if n_args % 2 != 0:
error('Error, missing arguments. Flag usage:' + error_msg)
# [class_1] [IoU_1] [class_2] [IoU_2]
# specific_iou_classes = ['class_1', 'class_2']
specific_iou_classes = args.set_class_iou[::2] # even
# iou_list = ['IoU_1', 'IoU_2']
iou_list = args.set_class_iou[1::2] # odd
if len(specific_iou_classes) != len(iou_list):
error('Error, missing arguments. Flag usage:' + error_msg)
for tmp_class in specific_iou_classes:
if tmp_class not in gt_classes:
error('Error, unknown class \"' + tmp_class + '\". Flag usage:' + error_msg)
for num in iou_list:
if not is_float_between_0_and_1(num):
error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg)
"""
detection-results
Load each of the detection-results files into a temporary ".json" file.
"""
# get a list with the detection-results files
dr_files_list = glob.glob(DR_PATH + '/*.txt')
dr_files_list.sort()
for class_index, class_name in enumerate(gt_classes):
bounding_boxes = []
for txt_file in dr_files_list:
#print(txt_file)
# the first time it checks if all the corresponding ground-truth files exist
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
temp_path = os.path.join(GT_PATH, (file_id + ".txt"))
if class_index == 0:
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines = file_lines_to_list(txt_file)
for line in lines:
try:
tmp_class_name, confidence, left, top, right, bottom = line.split()
except:
line_split = line.split()
bottom = line_split[-1]
right = line_split[-2]
top = line_split[-3]
left = line_split[-4]
confidence = line_split[-5]
tmp_class_name = ""
for name in line_split[:-5]:
tmp_class_name += name
if tmp_class_name == class_name:
#print("match")
bbox = left + " " + top + " " + right + " " +bottom
bounding_boxes.append({"confidence":confidence, "file_id":file_id, "bbox":bbox})
#print(bounding_boxes)
# sort detection-results by decreasing confidence
bounding_boxes.sort(key=lambda x:float(x['confidence']), reverse=True)
with open(TEMP_FILES_PATH + "/" + class_name + "_dr.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
"""
Calculate the AP for each class
"""
sum_AP = 0.0
ap_dictionary = {}
lamr_dictionary = {}
# open file to store the results
with open(results_files_path + "/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
"""
Load detection-results of that class
"""
dr_file = TEMP_FILES_PATH + "/" + class_name + "_dr.json"
dr_data = json.load(open(dr_file))
"""
Assign detection-results to ground-truth objects
"""
nd = len(dr_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
score = [0] * nd
score05_idx = 0
for idx, detection in enumerate(dr_data):
file_id = detection["file_id"]
score[idx] = float(detection["confidence"])
if score[idx] > 0.5:
score05_idx = idx
if show_animation:
# find ground truth image
ground_truth_img = glob.glob1(IMG_PATH, file_id + ".*")
#tifCounter = len(glob.glob1(myPath,"*.tif"))
if len(ground_truth_img) == 0:
error("Error. Image not found with id: " + file_id)
elif len(ground_truth_img) > 1:
error("Error. Multiple image with id: " + file_id)
else: # found image
#print(IMG_PATH + "/" + ground_truth_img[0])
# Load image
img = cv2.imread(IMG_PATH + "/" + ground_truth_img[0])
# load image with draws of multiple detections
img_cumulative_path = results_files_path + "/images/" + ground_truth_img[0]
if os.path.isfile(img_cumulative_path):
img_cumulative = cv2.imread(img_cumulative_path)
else:
img_cumulative = img.copy()
# Add bottom border to image
bottom_border = 60
BLACK = [0, 0, 0]
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
# assign detection-results to ground truth object if any
# open ground-truth with that file_id
gt_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load detected object bounding-box
bb = [ float(x) for x in detection["bbox"].split() ]
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ]
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# assign detection as true positive/don't care/false positive
if show_animation:
status = "NO MATCH FOUND!" # status is only used in the animation
# set minimum overlap
min_overlap = MINOVERLAP
if specific_iou_flagged:
if class_name in specific_iou_classes:
index = specific_iou_classes.index(class_name)
min_overlap = float(iou_list[index])
if ovmax >= min_overlap:
if "difficult" not in gt_match:
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
if show_animation:
status = "MATCH!"
else:
# false positive (multiple detection)
fp[idx] = 1
if show_animation:
status = "REPEATED MATCH!"
else:
# false positive
fp[idx] = 1
if ovmax > 0:
status = "INSUFFICIENT OVERLAP"
"""
Draw image to show animation
"""
if show_animation:
height, widht = img.shape[:2]
# colors (OpenCV works with BGR)
white = (255,255,255)
light_blue = (255,200,100)
green = (0,255,0)
light_red = (30,30,255)
# 1st line
margin = 10
v_pos = int(height - margin - (bottom_border / 2.0))
text = "Image: " + ground_truth_img[0] + " "
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
text = "Class [" + str(class_index) + "/" + str(n_classes) + "]: " + class_name + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue, line_width)
if ovmax != -1:
color = light_red
if status == "INSUFFICIENT OVERLAP":
text = "IoU: {0:.2f}% ".format(ovmax*100) + "< {0:.2f}% ".format(min_overlap*100)
else:
text = "IoU: {0:.2f}% ".format(ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100)
color = green
img, _ = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
# 2nd line
v_pos += int(bottom_border / 2.0)
rank_pos = str(idx+1) # rank position (idx starts at 0)
text = "Detection #rank: " + rank_pos + " confidence: {0:.2f}% ".format(float(detection["confidence"])*100)
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
color = light_red
if status == "MATCH!":
color = green
text = "Result: " + status + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
font = cv2.FONT_HERSHEY_SIMPLEX
if ovmax > 0: # if there is intersections between the bounding-boxes
bbgt = [ int(round(float(x))) for x in gt_match["bbox"].split() ]
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.rectangle(img_cumulative,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.putText(img_cumulative, class_name, (bbgt[0],bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA)
bb = [int(i) for i in bb]
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.rectangle(img_cumulative,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.putText(img_cumulative, class_name, (bb[0],bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA)
# show image
cv2.imshow("Animation", img)
cv2.waitKey(20) # show for 20 ms
# save image to results
output_img_path = results_files_path + "/images/detections_one_by_one/" + class_name + "_detection" + str(idx) + ".jpg"
cv2.imwrite(output_img_path, img)
# save the image with all the objects drawn to it
cv2.imwrite(img_cumulative_path, img_cumulative)
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
#print(tp)
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
#print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
#print(prec)
ap, mrec, mprec = voc_ap(rec[:], prec[:])
F1 = np.array(rec)*np.array(prec)/(np.array(prec)+np.array(rec))*2
sum_AP += ap
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)
if len(prec)>0:
F1_text = "{0:.2f}".format(F1[score05_idx]) + " = " + class_name + " F1 "
Recall_text = "{0:.2f}%".format(rec[score05_idx]*100) + " = " + class_name + " Recall "
Precision_text = "{0:.2f}%".format(prec[score05_idx]*100) + " = " + class_name + " Precision "
else:
F1_text = "0.00" + " = " + class_name + " F1 "
Recall_text = "0.00%" + " = " + class_name + " Recall "
Precision_text = "0.00%" + " = " + class_name + " Precision "
"""
Write to results.txt
"""
rounded_prec = [ '%.2f' % elem for elem in prec ]
rounded_rec = [ '%.2f' % elem for elem in rec ]
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
if not args.quiet:
if(len(rec)!=0):
print(text + "\t||\tscore_threhold=0.5 : " + "F1=" + "{0:.2f}".format(F1[score05_idx])\
+ " ; Recall=" + "{0:.2f}%".format(rec[score05_idx]*100) + " ; Precision=" + "{0:.2f}%".format(prec[score05_idx]*100))
ap_dictionary[class_name] = ap
n_images = counter_images_per_class[class_name]
lamr, mr, fppi = log_average_miss_rate(np.array(rec), np.array(fp), n_images)
lamr_dictionary[class_name] = lamr
"""
Draw plot
"""
if draw_plot:
plt.plot(rec, prec, '-o')
# add a new penultimate point to the list (mrec[-2], 0.0)
# since the last line segment (and respective area) do not affect the AP value
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
# set window title
fig = plt.gcf() # gcf - get current figure
fig.canvas.set_window_title('AP ' + class_name)
# set plot title
plt.title('class: ' + text)
#plt.suptitle('This is a somewhat long figure title', fontsize=16)
# set axis titles
plt.xlabel('Recall')
plt.ylabel('Precision')
# optional - set axes
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
# Alternative option -> wait for button to be pressed
# while not plt.waitforbuttonpress(): pass # wait for key display
# Alternative option -> normal display
# plt.show()
# save the plot
fig.savefig(results_files_path + "/AP/" + class_name + ".png")
plt.cla() # clear axes for next plot
plt.plot(score, F1, "-", color='orangered')
plt.title('class: ' + F1_text + "\nscore_threhold=0.5")
plt.xlabel('Score_Threhold')
plt.ylabel('F1')
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
fig.savefig(results_files_path + "/F1/" + class_name + ".png")
plt.cla() # clear axes for next plot
plt.plot(score, rec, "-H", color='gold')
plt.title('class: ' + Recall_text + "\nscore_threhold=0.5")
plt.xlabel('Score_Threhold')
plt.ylabel('Recall')
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
fig.savefig(results_files_path + "/Recall/" + class_name + ".png")
plt.cla() # clear axes for next plot
plt.plot(score, prec, "-s", color='palevioletred')
plt.title('class: ' + Precision_text + "\nscore_threhold=0.5")
plt.xlabel('Score_Threhold')
plt.ylabel('Precision')
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
fig.savefig(results_files_path + "/Precision/" + class_name + ".png")
plt.cla() # clear axes for next plot
if show_animation:
cv2.destroyAllWindows()
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP*100)
results_file.write(text + "\n")
print(text)
# remove the temp_files directory
shutil.rmtree(TEMP_FILES_PATH)
"""
Count total of detection-results
"""
# iterate through all the files
det_counter_per_class = {}
for txt_file in dr_files_list:
# get lines to list
lines_list = file_lines_to_list(txt_file)
for line in lines_list:
class_name = line.split()[0]
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
# count that object
if class_name in det_counter_per_class:
det_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
det_counter_per_class[class_name] = 1
#print(det_counter_per_class)
dr_classes = list(det_counter_per_class.keys())
"""
Plot the total number of occurences of each class in the ground-truth
"""
if draw_plot:
window_title = "ground-truth-info"
plot_title = "ground-truth\n"
plot_title += "(" + str(len(ground_truth_files_list)) + " files and " + str(n_classes) + " classes)"
x_label = "Number of objects per class"
output_path = results_files_path + "/ground-truth-info.png"
to_show = False
plot_color = 'forestgreen'
draw_plot_func(
gt_counter_per_class,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
'',
)
"""
Write number of ground-truth objects per class to results.txt
"""
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(gt_counter_per_class):
results_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n")
"""
Finish counting true positives
"""
for class_name in dr_classes:
# if class exists in detection-result but not in ground-truth then there are no true positives in that class
if class_name not in gt_classes:
count_true_positives[class_name] = 0
#print(count_true_positives)
"""
Plot the total number of occurences of each class in the "detection-results" folder
"""
if draw_plot:
window_title = "detection-results-info"
# Plot title
plot_title = "detection-results\n"
plot_title += "(" + str(len(dr_files_list)) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(det_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
# end Plot title
x_label = "Number of objects per class"
output_path = results_files_path + "/detection-results-info.png"
to_show = False
plot_color = 'forestgreen'
true_p_bar = count_true_positives
draw_plot_func(
det_counter_per_class,
len(det_counter_per_class),
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
true_p_bar
)
"""
Write number of detected objects per class to results.txt
"""
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of detected objects per class\n")
for class_name in sorted(dr_classes):
n_det = det_counter_per_class[class_name]
text = class_name + ": " + str(n_det)
text += " (tp:" + str(count_true_positives[class_name]) + ""
text += ", fp:" + str(n_det - count_true_positives[class_name]) + ")\n"
results_file.write(text)
"""
Draw log-average miss rate plot (Show lamr of all classes in decreasing order)
"""
if draw_plot:
window_title = "lamr"
plot_title = "log-average miss rate"
x_label = "log-average miss rate"
output_path = results_files_path + "/lamr.png"
to_show = False
plot_color = 'royalblue'
draw_plot_func(
lamr_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
"""
Draw mAP plot (Show AP's of all classes in decreasing order)
"""
if draw_plot:
window_title = "mAP"
plot_title = "mAP = {0:.2f}%".format(mAP*100)
x_label = "Average Precision"
output_path = results_files_path + "/mAP.png"
to_show = True
plot_color = 'royalblue'
draw_plot_func(
ap_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
| 38.929916 | 138 | 0.572427 | import argparse
import glob
import json
import math
import operator
import os
import shutil
import sys
import numpy as np
MINOVERLAP = 0.5
parser = argparse.ArgumentParser()
parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
args = parser.parse_args()
if args.ignore is None:
args.ignore = []
specific_iou_flagged = False
if args.set_class_iou is not None:
specific_iou_flagged = True
os.chdir(os.path.dirname(os.path.abspath(__file__)))
GT_PATH = os.path.join(os.getcwd(), 'input', 'ground-truth')
DR_PATH = os.path.join(os.getcwd(), 'input', 'detection-results')
IMG_PATH = os.path.join(os.getcwd(), 'input', 'images-optional')
if os.path.exists(IMG_PATH):
for dirpath, dirnames, files in os.walk(IMG_PATH):
if not files:
args.no_animation = True
else:
args.no_animation = True
show_animation = False
if not args.no_animation:
try:
import cv2
show_animation = True
except ImportError:
print("\"opencv-python\" not found, please install to visualize the results.")
args.no_animation = True
# try to import Matplotlib if the user didn't choose the option --no-plot
draw_plot = False
if not args.no_plot:
try:
import matplotlib.pyplot as plt
draw_plot = True
except ImportError:
print("\"matplotlib\" not found, please install it to get the resulting plots.")
args.no_plot = True
def log_average_miss_rate(precision, fp_cumsum, num_images):
if precision.size == 0:
lamr = 0
mr = 1
fppi = 0
return lamr, mr, fppi
fppi = fp_cumsum / float(num_images)
mr = (1 - precision)
fppi_tmp = np.insert(fppi, 0, -1.0)
mr_tmp = np.insert(mr, 0, 1.0)
ref = np.logspace(-2.0, 0.0, num = 9)
for i, ref_i in enumerate(ref):
j = np.where(fppi_tmp <= ref_i)[-1][-1]
ref[i] = mr_tmp[j]
lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
return lamr, mr, fppi
def error(msg):
print(msg)
sys.exit(0)
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
def voc_ap(rec, prec):
rec.insert(0, 0.0)
rec.append(1.0)
mrec = rec[:]
prec.insert(0, 0.0)
prec.append(0.0)
mpre = prec[:]
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i)
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
def file_lines_to_list(path):
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
def adjust_axes(r, t, fig, axes):
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
if true_p_bar != "":
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted)
plt.legend(loc='lower right')
fig = plt.gcf()
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1):
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf()
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val)
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
if i == (len(sorted_values)-1):
adjust_axes(r, t, fig, axes)
fig.canvas.set_window_title(window_title)
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
init_height = fig.get_figheight()
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4)
height_in = height_pt / dpi
top_margin = 0.15
bottom_margin = 0.05
figure_height = height_in / (1 - top_margin - bottom_margin)
if figure_height > init_height:
fig.set_figheight(figure_height)
plt.title(plot_title, fontsize=14)
plt.xlabel(x_label, fontsize='large')
fig.tight_layout()
fig.savefig(output_path)
if to_show:
plt.show()
plt.close()
TEMP_FILES_PATH = ".temp_files"
if not os.path.exists(TEMP_FILES_PATH):
os.makedirs(TEMP_FILES_PATH)
results_files_path = "results"
if os.path.exists(results_files_path): # if it exist already
# reset the results directory
shutil.rmtree(results_files_path)
os.makedirs(results_files_path)
if draw_plot:
os.makedirs(os.path.join(results_files_path, "AP"))
os.makedirs(os.path.join(results_files_path, "F1"))
os.makedirs(os.path.join(results_files_path, "Recall"))
os.makedirs(os.path.join(results_files_path, "Precision"))
if show_animation:
os.makedirs(os.path.join(results_files_path, "images", "detections_one_by_one"))
# get a list with the ground-truth files
ground_truth_files_list = glob.glob(GT_PATH + '/*.txt')
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
ground_truth_files_list.sort()
# dictionary with counter per class
gt_counter_per_class = {}
counter_images_per_class = {}
for txt_file in ground_truth_files_list:
#print(txt_file)
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# check if there is a correspondent detection-results file
temp_path = os.path.join(DR_PATH, (file_id + ".txt"))
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines_list = file_lines_to_list(txt_file)
# create ground-truth dictionary
bounding_boxes = []
is_difficult = False
already_seen_classes = []
for line in lines_list:
try:
if "difficult" in line:
class_name, left, top, right, bottom, _difficult = line.split()
is_difficult = True
else:
class_name, left, top, right, bottom = line.split()
except:
if "difficult" in line:
line_split = line.split()
_difficult = line_split[-1]
bottom = line_split[-2]
right = line_split[-3]
top = line_split[-4]
left = line_split[-5]
class_name = ""
for name in line_split[:-5]:
class_name += name
is_difficult = True
else:
line_split = line.split()
bottom = line_split[-1]
right = line_split[-2]
top = line_split[-3]
left = line_split[-4]
class_name = ""
for name in line_split[:-4]:
class_name += name
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
bbox = left + " " + top + " " + right + " " +bottom
if is_difficult:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False, "difficult":True})
is_difficult = False
else:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
# count that object
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
gt_counter_per_class[class_name] = 1
if class_name not in already_seen_classes:
if class_name in counter_images_per_class:
counter_images_per_class[class_name] += 1
else:
counter_images_per_class[class_name] = 1
already_seen_classes.append(class_name)
# dump bounding_boxes into a ".json" file
with open(TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# let's sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
if specific_iou_flagged:
n_args = len(args.set_class_iou)
error_msg = \
'\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]'
if n_args % 2 != 0:
error('Error, missing arguments. Flag usage:' + error_msg)
specific_iou_classes = args.set_class_iou[::2]
iou_list = args.set_class_iou[1::2]
if len(specific_iou_classes) != len(iou_list):
error('Error, missing arguments. Flag usage:' + error_msg)
for tmp_class in specific_iou_classes:
if tmp_class not in gt_classes:
error('Error, unknown class \"' + tmp_class + '\". Flag usage:' + error_msg)
for num in iou_list:
if not is_float_between_0_and_1(num):
error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg)
dr_files_list = glob.glob(DR_PATH + '/*.txt')
dr_files_list.sort()
for class_index, class_name in enumerate(gt_classes):
bounding_boxes = []
for txt_file in dr_files_list:
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
temp_path = os.path.join(GT_PATH, (file_id + ".txt"))
if class_index == 0:
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines = file_lines_to_list(txt_file)
for line in lines:
try:
tmp_class_name, confidence, left, top, right, bottom = line.split()
except:
line_split = line.split()
bottom = line_split[-1]
right = line_split[-2]
top = line_split[-3]
left = line_split[-4]
confidence = line_split[-5]
tmp_class_name = ""
for name in line_split[:-5]:
tmp_class_name += name
if tmp_class_name == class_name:
bbox = left + " " + top + " " + right + " " +bottom
bounding_boxes.append({"confidence":confidence, "file_id":file_id, "bbox":bbox})
bounding_boxes.sort(key=lambda x:float(x['confidence']), reverse=True)
with open(TEMP_FILES_PATH + "/" + class_name + "_dr.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
sum_AP = 0.0
ap_dictionary = {}
lamr_dictionary = {}
with open(results_files_path + "/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
dr_file = TEMP_FILES_PATH + "/" + class_name + "_dr.json"
dr_data = json.load(open(dr_file))
nd = len(dr_data)
tp = [0] * nd
fp = [0] * nd
score = [0] * nd
score05_idx = 0
for idx, detection in enumerate(dr_data):
file_id = detection["file_id"]
score[idx] = float(detection["confidence"])
if score[idx] > 0.5:
score05_idx = idx
if show_animation:
ground_truth_img = glob.glob1(IMG_PATH, file_id + ".*")
if len(ground_truth_img) == 0:
error("Error. Image not found with id: " + file_id)
elif len(ground_truth_img) > 1:
error("Error. Multiple image with id: " + file_id)
else:
img = cv2.imread(IMG_PATH + "/" + ground_truth_img[0])
img_cumulative_path = results_files_path + "/images/" + ground_truth_img[0]
if os.path.isfile(img_cumulative_path):
img_cumulative = cv2.imread(img_cumulative_path)
else:
img_cumulative = img.copy()
bottom_border = 60
BLACK = [0, 0, 0]
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
gt_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
bb = [ float(x) for x in detection["bbox"].split() ]
for obj in ground_truth_data:
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ]
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
if show_animation:
status = "NO MATCH FOUND!" # status is only used in the animation
# set minimum overlap
min_overlap = MINOVERLAP
if specific_iou_flagged:
if class_name in specific_iou_classes:
index = specific_iou_classes.index(class_name)
min_overlap = float(iou_list[index])
if ovmax >= min_overlap:
if "difficult" not in gt_match:
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
if show_animation:
status = "MATCH!"
else:
# false positive (multiple detection)
fp[idx] = 1
if show_animation:
status = "REPEATED MATCH!"
else:
# false positive
fp[idx] = 1
if ovmax > 0:
status = "INSUFFICIENT OVERLAP"
if show_animation:
height, widht = img.shape[:2]
# colors (OpenCV works with BGR)
white = (255,255,255)
light_blue = (255,200,100)
green = (0,255,0)
light_red = (30,30,255)
# 1st line
margin = 10
v_pos = int(height - margin - (bottom_border / 2.0))
text = "Image: " + ground_truth_img[0] + " "
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
text = "Class [" + str(class_index) + "/" + str(n_classes) + "]: " + class_name + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue, line_width)
if ovmax != -1:
color = light_red
if status == "INSUFFICIENT OVERLAP":
text = "IoU: {0:.2f}% ".format(ovmax*100) + "< {0:.2f}% ".format(min_overlap*100)
else:
text = "IoU: {0:.2f}% ".format(ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100)
color = green
img, _ = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
# 2nd line
v_pos += int(bottom_border / 2.0)
rank_pos = str(idx+1) # rank position (idx starts at 0)
text = "Detection #rank: " + rank_pos + " confidence: {0:.2f}% ".format(float(detection["confidence"])*100)
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
color = light_red
if status == "MATCH!":
color = green
text = "Result: " + status + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
font = cv2.FONT_HERSHEY_SIMPLEX
if ovmax > 0: # if there is intersections between the bounding-boxes
bbgt = [ int(round(float(x))) for x in gt_match["bbox"].split() ]
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.rectangle(img_cumulative,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.putText(img_cumulative, class_name, (bbgt[0],bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA)
bb = [int(i) for i in bb]
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.rectangle(img_cumulative,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.putText(img_cumulative, class_name, (bb[0],bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA)
# show image
cv2.imshow("Animation", img)
cv2.waitKey(20) # show for 20 ms
# save image to results
output_img_path = results_files_path + "/images/detections_one_by_one/" + class_name + "_detection" + str(idx) + ".jpg"
cv2.imwrite(output_img_path, img)
# save the image with all the objects drawn to it
cv2.imwrite(img_cumulative_path, img_cumulative)
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
#print(tp)
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
#print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
#print(prec)
ap, mrec, mprec = voc_ap(rec[:], prec[:])
F1 = np.array(rec)*np.array(prec)/(np.array(prec)+np.array(rec))*2
sum_AP += ap
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)
if len(prec)>0:
F1_text = "{0:.2f}".format(F1[score05_idx]) + " = " + class_name + " F1 "
Recall_text = "{0:.2f}%".format(rec[score05_idx]*100) + " = " + class_name + " Recall "
Precision_text = "{0:.2f}%".format(prec[score05_idx]*100) + " = " + class_name + " Precision "
else:
F1_text = "0.00" + " = " + class_name + " F1 "
Recall_text = "0.00%" + " = " + class_name + " Recall "
Precision_text = "0.00%" + " = " + class_name + " Precision "
rounded_prec = [ '%.2f' % elem for elem in prec ]
rounded_rec = [ '%.2f' % elem for elem in rec ]
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
if not args.quiet:
if(len(rec)!=0):
print(text + "\t||\tscore_threhold=0.5 : " + "F1=" + "{0:.2f}".format(F1[score05_idx])\
+ " ; Recall=" + "{0:.2f}%".format(rec[score05_idx]*100) + " ; Precision=" + "{0:.2f}%".format(prec[score05_idx]*100))
ap_dictionary[class_name] = ap
n_images = counter_images_per_class[class_name]
lamr, mr, fppi = log_average_miss_rate(np.array(rec), np.array(fp), n_images)
lamr_dictionary[class_name] = lamr
if draw_plot:
plt.plot(rec, prec, '-o')
# add a new penultimate point to the list (mrec[-2], 0.0)
# since the last line segment (and respective area) do not affect the AP value
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
# set window title
fig = plt.gcf() # gcf - get current figure
fig.canvas.set_window_title('AP ' + class_name)
# set plot title
plt.title('class: ' + text)
#plt.suptitle('This is a somewhat long figure title', fontsize=16)
# set axis titles
plt.xlabel('Recall')
plt.ylabel('Precision')
# optional - set axes
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
# Alternative option -> wait for button to be pressed
# while not plt.waitforbuttonpress(): pass # wait for key display
# Alternative option -> normal display
# plt.show()
# save the plot
fig.savefig(results_files_path + "/AP/" + class_name + ".png")
plt.cla() # clear axes for next plot
plt.plot(score, F1, "-", color='orangered')
plt.title('class: ' + F1_text + "\nscore_threhold=0.5")
plt.xlabel('Score_Threhold')
plt.ylabel('F1')
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
fig.savefig(results_files_path + "/F1/" + class_name + ".png")
plt.cla() # clear axes for next plot
plt.plot(score, rec, "-H", color='gold')
plt.title('class: ' + Recall_text + "\nscore_threhold=0.5")
plt.xlabel('Score_Threhold')
plt.ylabel('Recall')
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
fig.savefig(results_files_path + "/Recall/" + class_name + ".png")
plt.cla() # clear axes for next plot
plt.plot(score, prec, "-s", color='palevioletred')
plt.title('class: ' + Precision_text + "\nscore_threhold=0.5")
plt.xlabel('Score_Threhold')
plt.ylabel('Precision')
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
fig.savefig(results_files_path + "/Precision/" + class_name + ".png")
plt.cla() # clear axes for next plot
if show_animation:
cv2.destroyAllWindows()
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP*100)
results_file.write(text + "\n")
print(text)
# remove the temp_files directory
shutil.rmtree(TEMP_FILES_PATH)
# iterate through all the files
det_counter_per_class = {}
for txt_file in dr_files_list:
# get lines to list
lines_list = file_lines_to_list(txt_file)
for line in lines_list:
class_name = line.split()[0]
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
# count that object
if class_name in det_counter_per_class:
det_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
det_counter_per_class[class_name] = 1
dr_classes = list(det_counter_per_class.keys())
if draw_plot:
window_title = "ground-truth-info"
plot_title = "ground-truth\n"
plot_title += "(" + str(len(ground_truth_files_list)) + " files and " + str(n_classes) + " classes)"
x_label = "Number of objects per class"
output_path = results_files_path + "/ground-truth-info.png"
to_show = False
plot_color = 'forestgreen'
draw_plot_func(
gt_counter_per_class,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
'',
)
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(gt_counter_per_class):
results_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n")
for class_name in dr_classes:
if class_name not in gt_classes:
count_true_positives[class_name] = 0
if draw_plot:
window_title = "detection-results-info"
plot_title = "detection-results\n"
plot_title += "(" + str(len(dr_files_list)) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(det_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
x_label = "Number of objects per class"
output_path = results_files_path + "/detection-results-info.png"
to_show = False
plot_color = 'forestgreen'
true_p_bar = count_true_positives
draw_plot_func(
det_counter_per_class,
len(det_counter_per_class),
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
true_p_bar
)
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of detected objects per class\n")
for class_name in sorted(dr_classes):
n_det = det_counter_per_class[class_name]
text = class_name + ": " + str(n_det)
text += " (tp:" + str(count_true_positives[class_name]) + ""
text += ", fp:" + str(n_det - count_true_positives[class_name]) + ")\n"
results_file.write(text)
if draw_plot:
window_title = "lamr"
plot_title = "log-average miss rate"
x_label = "log-average miss rate"
output_path = results_files_path + "/lamr.png"
to_show = False
plot_color = 'royalblue'
draw_plot_func(
lamr_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
if draw_plot:
window_title = "mAP"
plot_title = "mAP = {0:.2f}%".format(mAP*100)
x_label = "Average Precision"
output_path = results_files_path + "/mAP.png"
to_show = True
plot_color = 'royalblue'
draw_plot_func(
ap_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
| true | true |
1c3162f3c2a0e8aa04611bdbe0ec3520a34c70ce | 593 | py | Python | capstone/capapi/tests/test_doc_views.py | jcushman/capstone | ef3ced77f69aabe14c89ab67003a6e88736bf777 | [
"MIT"
] | null | null | null | capstone/capapi/tests/test_doc_views.py | jcushman/capstone | ef3ced77f69aabe14c89ab67003a6e88736bf777 | [
"MIT"
] | 4 | 2021-09-02T20:54:31.000Z | 2022-02-27T14:04:06.000Z | capstone/capapi/tests/test_doc_views.py | jcushman/capstone | ef3ced77f69aabe14c89ab67003a6e88736bf777 | [
"MIT"
] | 1 | 2018-08-31T15:11:28.000Z | 2018-08-31T15:11:28.000Z | import re
import pytest
from capapi.tests.helpers import check_response
from django.conf import settings
@pytest.mark.django_db
def test_get_docs_urls(client, jurisdiction, case, reporter):
"""
Test that every url in docs.html is functional
"""
settings.API_DOCS_CASE_ID = case.id
response = client.get('/')
html = response.content.decode()
tmp_html = re.sub(";", "", html)
possible_lines = tmp_html.split("'")
for line in possible_lines:
if line[0:4] == "http":
response = client.get(line)
check_response(response)
| 23.72 | 61 | 0.664418 | import re
import pytest
from capapi.tests.helpers import check_response
from django.conf import settings
@pytest.mark.django_db
def test_get_docs_urls(client, jurisdiction, case, reporter):
settings.API_DOCS_CASE_ID = case.id
response = client.get('/')
html = response.content.decode()
tmp_html = re.sub(";", "", html)
possible_lines = tmp_html.split("'")
for line in possible_lines:
if line[0:4] == "http":
response = client.get(line)
check_response(response)
| true | true |
1c316427aff8b8d02a683b7d44980c9cd0d4f02a | 1,625 | py | Python | lib/utils/blob.py | wxshan/py-R-FCN | 347472ecbecb0bad38ff8deb612a5e9ed123c063 | [
"MIT"
] | 8,776 | 2015-10-05T04:08:47.000Z | 2022-03-30T17:12:08.000Z | lib/utils/blob.py | arasharchor/py-faster-rcnn_rotated | de5f4c7abbeca5e55930f863ccb78da4fe130e5a | [
"MIT"
] | 900 | 2015-10-08T04:10:13.000Z | 2022-03-10T05:13:56.000Z | lib/utils/blob.py | arasharchor/py-faster-rcnn_rotated | de5f4c7abbeca5e55930f863ccb78da4fe130e5a | [
"MIT"
] | 4,685 | 2015-10-05T04:08:50.000Z | 2022-03-31T04:44:31.000Z | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Blob helper functions."""
import numpy as np
import cv2
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, height, width)
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
| 35.326087 | 75 | 0.607385 |
import numpy as np
import cv2
def im_list_to_blob(ims):
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
| true | true |
1c3164479f8062feb8f1fd30f8df5e3ef93c9794 | 337 | py | Python | src/dialogs/data_models/room_reservation.py | Joffreybvn/resa-chatbot | 3ca845d4264f29f49b5b0bcee39a0ece7f480f66 | [
"MIT"
] | null | null | null | src/dialogs/data_models/room_reservation.py | Joffreybvn/resa-chatbot | 3ca845d4264f29f49b5b0bcee39a0ece7f480f66 | [
"MIT"
] | null | null | null | src/dialogs/data_models/room_reservation.py | Joffreybvn/resa-chatbot | 3ca845d4264f29f49b5b0bcee39a0ece7f480f66 | [
"MIT"
] | 1 | 2021-02-03T18:43:38.000Z | 2021-02-03T18:43:38.000Z |
class RoomReservation:
"""Hotel's room reservation state."""
def __init__(self, people: int = None, duration: int = None, breakfast: bool = None):
self.people: int = people # Number of people
self.duration: int = duration # Number of nights
self.breakfast: bool = breakfast # If they take breakfast
| 33.7 | 89 | 0.655786 |
class RoomReservation:
def __init__(self, people: int = None, duration: int = None, breakfast: bool = None):
self.people: int = people
self.duration: int = duration
self.breakfast: bool = breakfast
| true | true |
1c3164ae3d284c6014e8e16d40e6d0a532019d1a | 10,281 | py | Python | src/crate/client/sqlalchemy/dialect.py | mxm/crate-python | de13bf4a04e7c45864ebfdc144dffe1ddb53b88f | [
"Apache-2.0"
] | null | null | null | src/crate/client/sqlalchemy/dialect.py | mxm/crate-python | de13bf4a04e7c45864ebfdc144dffe1ddb53b88f | [
"Apache-2.0"
] | null | null | null | src/crate/client/sqlalchemy/dialect.py | mxm/crate-python | de13bf4a04e7c45864ebfdc144dffe1ddb53b88f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8; -*-
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from __future__ import absolute_import
import logging
from datetime import datetime, date
from sqlalchemy import types as sqltypes
from sqlalchemy.engine import default, reflection
from .compiler import (
CrateCompiler,
CrateTypeCompiler,
CrateDDLCompiler
)
from crate.client.exceptions import TimezoneUnawareException
from .types import Object, ObjectArray
SCHEMA_MIN_VERSION = (0, 57, 0)
TYPES_MAP = {
"boolean": sqltypes.Boolean,
"short": sqltypes.SmallInteger,
"timestamp": sqltypes.TIMESTAMP,
"object": Object,
"integer": sqltypes.Integer,
"long": sqltypes.NUMERIC,
"double": sqltypes.DECIMAL,
"object_array": ObjectArray,
"float": sqltypes.Float,
"string": sqltypes.String
}
try:
# SQLAlchemy >= 1.1
from sqlalchemy.types import ARRAY
TYPES_MAP["integer_array"] = ARRAY(sqltypes.Integer)
TYPES_MAP["boolean_array"] = ARRAY(sqltypes.Boolean)
TYPES_MAP["short_array"] = ARRAY(sqltypes.SmallInteger)
TYPES_MAP["timestamp_array"] = ARRAY(sqltypes.TIMESTAMP)
TYPES_MAP["long_array"] = ARRAY(sqltypes.NUMERIC)
TYPES_MAP["double_array"] = ARRAY(sqltypes.DECIMAL)
TYPES_MAP["float_array"] = ARRAY(sqltypes.Float)
TYPES_MAP["string_array"] = ARRAY(sqltypes.String)
except:
pass
log = logging.getLogger(__name__)
class Date(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if value is not None:
assert isinstance(value, date)
return value.strftime('%Y-%m-%d')
return process
def result_processor(self, dialect, coltype):
def process(value):
if not value:
return
try:
return datetime.utcfromtimestamp(value / 1e3).date()
except TypeError:
pass
# Crate doesn't really have datetime or date types but a
# timestamp type. The "date" mapping (conversion to long)
# is only applied if the schema definition for the column exists
# and if the sql insert statement was used.
# In case of dynamic mapping or using the rest indexing endpoint
# the date will be returned in the format it was inserted.
log.warning(
"Received timestamp isn't a long value."
"Trying to parse as date string and then as datetime string")
try:
return datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ').date()
return process
class DateTime(sqltypes.DateTime):
TZ_ERROR_MSG = "Timezone aware datetime objects are not supported"
def bind_processor(self, dialect):
def process(value):
if value is not None:
assert isinstance(value, datetime)
if value.tzinfo is not None:
raise TimezoneUnawareException(DateTime.TZ_ERROR_MSG)
return value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
if not value:
return
try:
return datetime.utcfromtimestamp(value / 1e3)
except TypeError:
pass
# Crate doesn't really have datetime or date types but a
# timestamp type. The "date" mapping (conversion to long)
# is only applied if the schema definition for the column exists
# and if the sql insert statement was used.
# In case of dynamic mapping or using the rest indexing endpoint
# the date will be returned in the format it was inserted.
log.warning(
"Received timestamp isn't a long value."
"Trying to parse as datetime string and then as date string")
try:
return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
return datetime.strptime(value, '%Y-%m-%d')
return process
colspecs = {
sqltypes.DateTime: DateTime,
sqltypes.Date: Date
}
class CrateDialect(default.DefaultDialect):
name = 'crate'
statement_compiler = CrateCompiler
ddl_compiler = CrateDDLCompiler
type_compiler = CrateTypeCompiler
supports_native_boolean = True
colspecs = colspecs
def __init__(self, *args, **kwargs):
super(CrateDialect, self).__init__(*args, **kwargs)
# currently our sql parser doesn't support unquoted column names that
# start with _. Adding it here causes sqlalchemy to quote such columns
self.identifier_preparer.illegal_initial_characters.add('_')
def initialize(self, connection):
# get lowest server version
self.server_version_info = \
self._get_server_version_info(connection)
# get default schema name
self.default_schema_name = \
self._get_default_schema_name(connection)
def do_rollback(self, connection):
# if any exception is raised by the dbapi, sqlalchemy by default
# attempts to do a rollback crate doesn't support rollbacks.
# implementing this as noop seems to cause sqlalchemy to propagate the
# original exception to the user
pass
def connect(self, host=None, port=None, *args, **kwargs):
server = None
if host:
server = '{0}:{1}'.format(host, port or '4200')
if 'servers' in kwargs:
server = kwargs.pop('servers')
if server:
return self.dbapi.connect(servers=server, **kwargs)
return self.dbapi.connect(**kwargs)
def _get_default_schema_name(self, connection):
return 'doc'
def _get_server_version_info(self, connection):
return tuple(connection.connection.lowest_server_version.version)
@classmethod
def dbapi(cls):
from crate import client
return client
def has_schema(self, connection, schema):
return schema in self.get_schema_names(connection)
def has_table(self, connection, table_name, schema=None):
return table_name in self.get_table_names(connection, schema=schema)
@reflection.cache
def get_schema_names(self, connection, **kw):
cursor = connection.execute(
"select schema_name "
"from information_schema.schemata "
"order by schema_name asc"
)
return [row[0] for row in cursor.fetchall()]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
cursor = connection.execute(
"select table_name from information_schema.tables "
"where {0} = ? "
"order by table_name asc, {0} asc".format(self.schema_column),
[schema or self.default_schema_name]
)
return [row[0] for row in cursor.fetchall()]
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
query = "SELECT column_name, data_type " \
"FROM information_schema.columns " \
"WHERE table_name = ? AND {schema_col}=? " \
"AND column_name !~ ?" \
.format(schema_col=self.schema_column)
cursor = connection.execute(
query,
[table_name,
schema or self.default_schema_name,
"(.*)\[\'(.*)\'\]"] # regex to filter subscript
)
return [self._create_column_info(row) for row in cursor.fetchall()]
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
query = "SELECT constraint_name " \
"FROM information_schema.table_constraints " \
"WHERE table_name=? " \
"AND {schema_col}=? AND constraint_type='PRIMARY_KEY' " \
.format(schema_col=self.schema_column)
pk = connection.execute(
query,
[table_name, schema or self.default_schema_name]
).fetchone()
return {'constrained_columns': set(*pk) if pk is not None else (),
'name': 'PRIMARY KEY'}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None,
postgresql_ignore_search_path=False, **kw):
# Crate doesn't support Foreign Keys, so this stays empty
return []
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
return []
@property
def schema_column(self):
return "table_schema" \
if self.server_version_info >= SCHEMA_MIN_VERSION \
else "schema_name"
def _create_column_info(self, row):
return {
'name': row[0],
'type': self._resolve_type(row[1]),
# In Crate every column is nullable except PK
# Primary Key Constraints are not nullable anyway, no matter what
# we return here, so it's fine to return always `True`
'nullable': True
}
def _resolve_type(self, type_):
return TYPES_MAP.get(type_, sqltypes.UserDefinedType)
| 36.587189 | 79 | 0.636028 |
from __future__ import absolute_import
import logging
from datetime import datetime, date
from sqlalchemy import types as sqltypes
from sqlalchemy.engine import default, reflection
from .compiler import (
CrateCompiler,
CrateTypeCompiler,
CrateDDLCompiler
)
from crate.client.exceptions import TimezoneUnawareException
from .types import Object, ObjectArray
SCHEMA_MIN_VERSION = (0, 57, 0)
TYPES_MAP = {
"boolean": sqltypes.Boolean,
"short": sqltypes.SmallInteger,
"timestamp": sqltypes.TIMESTAMP,
"object": Object,
"integer": sqltypes.Integer,
"long": sqltypes.NUMERIC,
"double": sqltypes.DECIMAL,
"object_array": ObjectArray,
"float": sqltypes.Float,
"string": sqltypes.String
}
try:
from sqlalchemy.types import ARRAY
TYPES_MAP["integer_array"] = ARRAY(sqltypes.Integer)
TYPES_MAP["boolean_array"] = ARRAY(sqltypes.Boolean)
TYPES_MAP["short_array"] = ARRAY(sqltypes.SmallInteger)
TYPES_MAP["timestamp_array"] = ARRAY(sqltypes.TIMESTAMP)
TYPES_MAP["long_array"] = ARRAY(sqltypes.NUMERIC)
TYPES_MAP["double_array"] = ARRAY(sqltypes.DECIMAL)
TYPES_MAP["float_array"] = ARRAY(sqltypes.Float)
TYPES_MAP["string_array"] = ARRAY(sqltypes.String)
except:
pass
log = logging.getLogger(__name__)
class Date(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if value is not None:
assert isinstance(value, date)
return value.strftime('%Y-%m-%d')
return process
def result_processor(self, dialect, coltype):
def process(value):
if not value:
return
try:
return datetime.utcfromtimestamp(value / 1e3).date()
except TypeError:
pass
# timestamp type. The "date" mapping (conversion to long)
# is only applied if the schema definition for the column exists
# and if the sql insert statement was used.
# In case of dynamic mapping or using the rest indexing endpoint
# the date will be returned in the format it was inserted.
log.warning(
"Received timestamp isn't a long value."
"Trying to parse as date string and then as datetime string")
try:
return datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ').date()
return process
class DateTime(sqltypes.DateTime):
TZ_ERROR_MSG = "Timezone aware datetime objects are not supported"
def bind_processor(self, dialect):
def process(value):
if value is not None:
assert isinstance(value, datetime)
if value.tzinfo is not None:
raise TimezoneUnawareException(DateTime.TZ_ERROR_MSG)
return value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
if not value:
return
try:
return datetime.utcfromtimestamp(value / 1e3)
except TypeError:
pass
# timestamp type. The "date" mapping (conversion to long)
# is only applied if the schema definition for the column exists
# and if the sql insert statement was used.
# In case of dynamic mapping or using the rest indexing endpoint
# the date will be returned in the format it was inserted.
log.warning(
"Received timestamp isn't a long value."
"Trying to parse as datetime string and then as date string")
try:
return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
return datetime.strptime(value, '%Y-%m-%d')
return process
colspecs = {
sqltypes.DateTime: DateTime,
sqltypes.Date: Date
}
class CrateDialect(default.DefaultDialect):
name = 'crate'
statement_compiler = CrateCompiler
ddl_compiler = CrateDDLCompiler
type_compiler = CrateTypeCompiler
supports_native_boolean = True
colspecs = colspecs
def __init__(self, *args, **kwargs):
super(CrateDialect, self).__init__(*args, **kwargs)
# start with _. Adding it here causes sqlalchemy to quote such columns
self.identifier_preparer.illegal_initial_characters.add('_')
def initialize(self, connection):
# get lowest server version
self.server_version_info = \
self._get_server_version_info(connection)
# get default schema name
self.default_schema_name = \
self._get_default_schema_name(connection)
def do_rollback(self, connection):
# if any exception is raised by the dbapi, sqlalchemy by default
# attempts to do a rollback crate doesn't support rollbacks.
pass
def connect(self, host=None, port=None, *args, **kwargs):
server = None
if host:
server = '{0}:{1}'.format(host, port or '4200')
if 'servers' in kwargs:
server = kwargs.pop('servers')
if server:
return self.dbapi.connect(servers=server, **kwargs)
return self.dbapi.connect(**kwargs)
def _get_default_schema_name(self, connection):
return 'doc'
def _get_server_version_info(self, connection):
return tuple(connection.connection.lowest_server_version.version)
@classmethod
def dbapi(cls):
from crate import client
return client
def has_schema(self, connection, schema):
return schema in self.get_schema_names(connection)
def has_table(self, connection, table_name, schema=None):
return table_name in self.get_table_names(connection, schema=schema)
@reflection.cache
def get_schema_names(self, connection, **kw):
cursor = connection.execute(
"select schema_name "
"from information_schema.schemata "
"order by schema_name asc"
)
return [row[0] for row in cursor.fetchall()]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
cursor = connection.execute(
"select table_name from information_schema.tables "
"where {0} = ? "
"order by table_name asc, {0} asc".format(self.schema_column),
[schema or self.default_schema_name]
)
return [row[0] for row in cursor.fetchall()]
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
query = "SELECT column_name, data_type " \
"FROM information_schema.columns " \
"WHERE table_name = ? AND {schema_col}=? " \
"AND column_name !~ ?" \
.format(schema_col=self.schema_column)
cursor = connection.execute(
query,
[table_name,
schema or self.default_schema_name,
"(.*)\[\'(.*)\'\]"]
)
return [self._create_column_info(row) for row in cursor.fetchall()]
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
query = "SELECT constraint_name " \
"FROM information_schema.table_constraints " \
"WHERE table_name=? " \
"AND {schema_col}=? AND constraint_type='PRIMARY_KEY' " \
.format(schema_col=self.schema_column)
pk = connection.execute(
query,
[table_name, schema or self.default_schema_name]
).fetchone()
return {'constrained_columns': set(*pk) if pk is not None else (),
'name': 'PRIMARY KEY'}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None,
postgresql_ignore_search_path=False, **kw):
return []
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
return []
@property
def schema_column(self):
return "table_schema" \
if self.server_version_info >= SCHEMA_MIN_VERSION \
else "schema_name"
def _create_column_info(self, row):
return {
'name': row[0],
'type': self._resolve_type(row[1]),
# In Crate every column is nullable except PK
# Primary Key Constraints are not nullable anyway, no matter what
# we return here, so it's fine to return always `True`
'nullable': True
}
def _resolve_type(self, type_):
return TYPES_MAP.get(type_, sqltypes.UserDefinedType)
| true | true |
1c3165112802098c2143e6851e1196e5ee5308b9 | 476 | py | Python | epi_judge_python/bst_node.py | shobhitmishra/CodingProblems | 0fc8c5037eef95b3ec9826b3a6e48885fc86659e | [
"MIT"
] | null | null | null | epi_judge_python/bst_node.py | shobhitmishra/CodingProblems | 0fc8c5037eef95b3ec9826b3a6e48885fc86659e | [
"MIT"
] | null | null | null | epi_judge_python/bst_node.py | shobhitmishra/CodingProblems | 0fc8c5037eef95b3ec9826b3a6e48885fc86659e | [
"MIT"
] | null | null | null | from test_framework.binary_tree_utils import (binary_tree_to_string,
equal_binary_trees)
class BstNode:
def __init__(self, data=None, left=None, right=None):
self.data, self.left, self.right = data, left, right
def __eq__(self, other):
return equal_binary_trees(self, other)
def __repr__(self):
return str(binary_tree_to_string(self))
def __str__(self):
return self.__repr__()
| 28 | 68 | 0.634454 | from test_framework.binary_tree_utils import (binary_tree_to_string,
equal_binary_trees)
class BstNode:
def __init__(self, data=None, left=None, right=None):
self.data, self.left, self.right = data, left, right
def __eq__(self, other):
return equal_binary_trees(self, other)
def __repr__(self):
return str(binary_tree_to_string(self))
def __str__(self):
return self.__repr__()
| true | true |
1c3165daab4a6b606aa7653f0eb1609c0722ff52 | 9,248 | py | Python | loggers_control/scripts/envs/se.py | linZHank/two_loggers | 34b02e443681ddabe796d73863b24b5499168895 | [
"MIT"
] | 4 | 2019-06-09T08:25:25.000Z | 2020-09-19T01:04:49.000Z | loggers_control/scripts/envs/se.py | linZHank/two_loggers | 34b02e443681ddabe796d73863b24b5499168895 | [
"MIT"
] | 7 | 2019-02-01T21:52:27.000Z | 2020-11-02T03:46:21.000Z | loggers_control/scripts/envs/se.py | linZHank/two_loggers | 34b02e443681ddabe796d73863b24b5499168895 | [
"MIT"
] | 5 | 2019-05-06T15:21:25.000Z | 2021-09-22T02:48:33.000Z | #!/usr/bin/env python
"""
Solo escape environment with discrete action space
"""
from __future__ import absolute_import, division, print_function
import sys
import os
import numpy as np
from numpy import pi
from numpy import random
import rospy
import tf
from std_srvs.srv import Empty
from gazebo_msgs.srv import SetModelState, GetModelState
from gazebo_msgs.msg import ModelState, ModelStates
from geometry_msgs.msg import Pose, Twist
class SoloEscape:
def __init__(self):
# env properties
self.env_type = 'discrete'
self.name = 'solo_escape_discrete'
rospy.init_node(self.name, anonymous=True, log_level=rospy.DEBUG)
self.rate = rospy.Rate(1000) # gazebo world is running at 1000 hz
self.max_episode_steps = 1000
self.observation_space_shape = (6,) # x, y, x_d, y_d, th, th_d
self.action_space_shape = ()
self.action_reservoir = np.array([[1.5,pi/3], [1.5,-pi/3], [-1.5,pi/3], [-1.5,-pi/3]])
# robot properties
self.model_states = ModelStates()
self.obs = np.zeros(self.observation_space_shape)
self.prev_obs = np.zeros(self.observation_space_shape)
self.status = 'deactivated'
self.world_name = rospy.get_param('/world_name')
self.exit_width = rospy.get_param('/exit_width')
# services
self.reset_world_proxy = rospy.ServiceProxy('/gazebo/reset_world', Empty)
self.unpause_physics_proxy = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause_physics_proxy = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.set_model_state_proxy = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.get_model_state_proxy = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
# topic publisher
self.cmd_vel_pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
# subscriber
rospy.Subscriber("/gazebo/model_states", ModelStates, self._model_states_callback) # model states are under monitoring
def pausePhysics(self):
rospy.wait_for_service("/gazebo/pause_physics")
try:
self.pause_physics_proxy()
except rospy.ServiceException as e:
rospy.logerr("/gazebo/pause_physics service call failed")
def unpausePhysics(self):
rospy.wait_for_service("/gazebo/unpause_physics")
try:
self.unpause_physics_proxy()
except rospy.ServiceException as e:
rospy.logerr("/gazebo/unpause_physics service call failed")
def resetWorld(self):
rospy.wait_for_service("/gazebo/reset_world")
try:
self.reset_world_proxy()
except rospy.ServiceException as e:
rospy.logerr("/gazebo/reset_world service call failed")
def setModelState(self, model_state):
rospy.wait_for_service('/gazebo/set_model_state')
try:
self.set_model_state_proxy(model_state)
except rospy.ServiceException as e:
rospy.logerr("Service call failed: {}".format(e))
def reset(self, init_pose=None):
"""
Reset environment
Usage:
obs = env.reset()
"""
rospy.logdebug("\nStart environment reset")
# set init pose
self.resetWorld()
self.obs = self._set_pose(init_pose)
self.prev_obs = self.obs.copy()
self.step_counter = 0
# self.y = obs[1]
# self.prev_y = obs[1]
rospy.logerr("\nEnvironment reset!!!")
return self.obs
def step(self, action_index):
"""
obs, rew, done, info = env.step(action_index)
"""
assert 0<=action_index<self.action_reservoir.shape[0]
rospy.logdebug("\nStart Environment Step")
action = self.action_reservoir[action_index]
self._take_action(action)
self._get_observation()
# compute reward and done
reward, done = self._compute_reward()
self.prev_obs = self.obs.copy()
info = self.status
self.step_counter += 1 # make sure inc step counter before compute reward
if self.step_counter>=self.max_episode_steps:
rospy.logwarn("Step: {}, \nMax step reached...".format(self.step_counter))
rospy.logdebug("End Environment Step\n")
return self.obs, reward, done, info
def _set_pose(self, pose=None):
"""
Set logger with a random or a given pose
Args:
pose: array([x,y,\omega])
Returns:
"""
rospy.logdebug("\nStart setting pose...")
logger_pose = ModelState()
logger_pose.model_name = "logger"
logger_pose.reference_frame = "world"
logger_pose.pose.position.z = 0.1
if pose is None: # random pose
x = random.uniform(-4, 4)
y = random.uniform(-4, 4)
th = random.uniform(-pi, pi)
else: # inialize accordingly
assert pose.shape==(3,)
assert pose[0] <= 4.5
assert pose[1] <= 4.5
assert -pi<=pose[2]<= pi # theta within [-pi,pi]
x = pose[0]
y = pose[1]
th = pose[2]
quat = tf.transformations.quaternion_from_euler(0, 0, th)
logger_pose.pose.position.x = x
logger_pose.pose.position.y = y
logger_pose.pose.orientation.z = quat[2]
logger_pose.pose.orientation.w = quat[3]
# set pose until on spot
self.unpausePhysics()
zero_vel = np.zeros(2)
self._take_action(zero_vel)
self.setModelState(model_state=logger_pose)
self._take_action(zero_vel)
self._get_observation()
self.pausePhysics()
rospy.logdebug("\nEND setting pose...")
return self.obs
def _get_observation(self):
"""
Get observation of double_logger's state
Args:
Returns:
obs: array([x,y,xdot,ydot,theta,thetadot])
"""
id_logger = self.model_states.name.index("logger")
logger_pose = self.model_states.pose[id_logger]
logger_twist = self.model_states.twist[id_logger]
quat = [
logger_pose.orientation.x,
logger_pose.orientation.y,
logger_pose.orientation.z,
logger_pose.orientation.w
]
euler = tf.transformations.euler_from_quaternion(quat)
self.obs[0] = logger_pose.position.x
self.obs[1] = logger_pose.position.y
self.obs[2] = logger_twist.linear.x
self.obs[3] = logger_twist.linear.y
self.obs[4] = euler[2]
self.obs[5] = logger_twist.angular.z
# update status
if self.obs[0] > 4.7:
self.status = "east"
elif self.obs[0] < -4.7:
self.status = "west"
elif self.obs[1] > 4.7:
self.status = "north"
elif -6<=self.obs[1]<=-4.7:
if np.absolute(self.obs[0]) > self.exit_width/2.:
self.status = "south"
else:
if np.absolute(self.obs[0]) > (self.exit_width/2.-0.255): # robot_radius=0.25
self.status = 'door' # stuck at door
else:
self.status = "trapped" # tunneling through door
elif self.obs[1] < -6.25:
self.status = "escaped"
else:
self.status = "trapped"
def _take_action(self, action):
"""
Publish cmd_vel according to an action index
Args:
action: int(scalar)
Returns:
"""
rospy.logdebug("\nStart taking action")
cmd_vel = Twist()
cmd_vel.linear.x = action[0]
cmd_vel.angular.z = action[1]
self.unpausePhysics()
for _ in range(50):
self.cmd_vel_pub.publish(cmd_vel)
self.rate.sleep()
rospy.logdebug("cmd_vel: {}".format(cmd_vel))
self.pausePhysics()
rospy.logdebug("\nEnd taking action")
def _compute_reward(self):
"""
Compute reward and done based on current status
Return:
reward:
done
"""
rospy.logdebug("\nStart Computing Reward")
reward, done = -.1, False
if self.status == 'escaped':
reward = 100.
done = True
rospy.logerr("\n!!!!!!!!!!!!!!!!\nLogger Escaped !\n!!!!!!!!!!!!!!!!")
else:
if self.status == 'trapped':
if self.obs[1]<-5:
reward = 10*(self.prev_obs[1] - self.obs[1]) - 0.1
else:
reward = -100.
done = True
rospy.logdebug("End Computing Reward\n")
return reward, done
def _model_states_callback(self, data):
self.model_states = data
if __name__ == "__main__":
env = SoloEscape()
num_steps = env.max_episode_steps
obs = env.reset()
ep, st = 0, 0
for t in range(env.max_episode_steps):
a = t%2
o, r, d, i = env.step(a)
st += 1
rospy.loginfo("\n-\nepisode: {}, step: {} \nobs: {}, act: {}, reward: {}, done: {}, info: {}".format(ep+1, st, o, a, r, d, i))
if d:
ep += 1
st = 0
obs = env.reset()
| 35.030303 | 134 | 0.585424 |
from __future__ import absolute_import, division, print_function
import sys
import os
import numpy as np
from numpy import pi
from numpy import random
import rospy
import tf
from std_srvs.srv import Empty
from gazebo_msgs.srv import SetModelState, GetModelState
from gazebo_msgs.msg import ModelState, ModelStates
from geometry_msgs.msg import Pose, Twist
class SoloEscape:
def __init__(self):
self.env_type = 'discrete'
self.name = 'solo_escape_discrete'
rospy.init_node(self.name, anonymous=True, log_level=rospy.DEBUG)
self.rate = rospy.Rate(1000)
self.max_episode_steps = 1000
self.observation_space_shape = (6,)
self.action_space_shape = ()
self.action_reservoir = np.array([[1.5,pi/3], [1.5,-pi/3], [-1.5,pi/3], [-1.5,-pi/3]])
self.model_states = ModelStates()
self.obs = np.zeros(self.observation_space_shape)
self.prev_obs = np.zeros(self.observation_space_shape)
self.status = 'deactivated'
self.world_name = rospy.get_param('/world_name')
self.exit_width = rospy.get_param('/exit_width')
self.reset_world_proxy = rospy.ServiceProxy('/gazebo/reset_world', Empty)
self.unpause_physics_proxy = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause_physics_proxy = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.set_model_state_proxy = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.get_model_state_proxy = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self.cmd_vel_pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
rospy.Subscriber("/gazebo/model_states", ModelStates, self._model_states_callback)
def pausePhysics(self):
rospy.wait_for_service("/gazebo/pause_physics")
try:
self.pause_physics_proxy()
except rospy.ServiceException as e:
rospy.logerr("/gazebo/pause_physics service call failed")
def unpausePhysics(self):
rospy.wait_for_service("/gazebo/unpause_physics")
try:
self.unpause_physics_proxy()
except rospy.ServiceException as e:
rospy.logerr("/gazebo/unpause_physics service call failed")
def resetWorld(self):
rospy.wait_for_service("/gazebo/reset_world")
try:
self.reset_world_proxy()
except rospy.ServiceException as e:
rospy.logerr("/gazebo/reset_world service call failed")
def setModelState(self, model_state):
rospy.wait_for_service('/gazebo/set_model_state')
try:
self.set_model_state_proxy(model_state)
except rospy.ServiceException as e:
rospy.logerr("Service call failed: {}".format(e))
def reset(self, init_pose=None):
rospy.logdebug("\nStart environment reset")
self.resetWorld()
self.obs = self._set_pose(init_pose)
self.prev_obs = self.obs.copy()
self.step_counter = 0
rospy.logerr("\nEnvironment reset!!!")
return self.obs
def step(self, action_index):
assert 0<=action_index<self.action_reservoir.shape[0]
rospy.logdebug("\nStart Environment Step")
action = self.action_reservoir[action_index]
self._take_action(action)
self._get_observation()
reward, done = self._compute_reward()
self.prev_obs = self.obs.copy()
info = self.status
self.step_counter += 1
if self.step_counter>=self.max_episode_steps:
rospy.logwarn("Step: {}, \nMax step reached...".format(self.step_counter))
rospy.logdebug("End Environment Step\n")
return self.obs, reward, done, info
def _set_pose(self, pose=None):
rospy.logdebug("\nStart setting pose...")
logger_pose = ModelState()
logger_pose.model_name = "logger"
logger_pose.reference_frame = "world"
logger_pose.pose.position.z = 0.1
if pose is None:
x = random.uniform(-4, 4)
y = random.uniform(-4, 4)
th = random.uniform(-pi, pi)
else:
assert pose.shape==(3,)
assert pose[0] <= 4.5
assert pose[1] <= 4.5
assert -pi<=pose[2]<= pi
x = pose[0]
y = pose[1]
th = pose[2]
quat = tf.transformations.quaternion_from_euler(0, 0, th)
logger_pose.pose.position.x = x
logger_pose.pose.position.y = y
logger_pose.pose.orientation.z = quat[2]
logger_pose.pose.orientation.w = quat[3]
self.unpausePhysics()
zero_vel = np.zeros(2)
self._take_action(zero_vel)
self.setModelState(model_state=logger_pose)
self._take_action(zero_vel)
self._get_observation()
self.pausePhysics()
rospy.logdebug("\nEND setting pose...")
return self.obs
def _get_observation(self):
id_logger = self.model_states.name.index("logger")
logger_pose = self.model_states.pose[id_logger]
logger_twist = self.model_states.twist[id_logger]
quat = [
logger_pose.orientation.x,
logger_pose.orientation.y,
logger_pose.orientation.z,
logger_pose.orientation.w
]
euler = tf.transformations.euler_from_quaternion(quat)
self.obs[0] = logger_pose.position.x
self.obs[1] = logger_pose.position.y
self.obs[2] = logger_twist.linear.x
self.obs[3] = logger_twist.linear.y
self.obs[4] = euler[2]
self.obs[5] = logger_twist.angular.z
if self.obs[0] > 4.7:
self.status = "east"
elif self.obs[0] < -4.7:
self.status = "west"
elif self.obs[1] > 4.7:
self.status = "north"
elif -6<=self.obs[1]<=-4.7:
if np.absolute(self.obs[0]) > self.exit_width/2.:
self.status = "south"
else:
if np.absolute(self.obs[0]) > (self.exit_width/2.-0.255):
self.status = 'door'
else:
self.status = "trapped"
elif self.obs[1] < -6.25:
self.status = "escaped"
else:
self.status = "trapped"
def _take_action(self, action):
rospy.logdebug("\nStart taking action")
cmd_vel = Twist()
cmd_vel.linear.x = action[0]
cmd_vel.angular.z = action[1]
self.unpausePhysics()
for _ in range(50):
self.cmd_vel_pub.publish(cmd_vel)
self.rate.sleep()
rospy.logdebug("cmd_vel: {}".format(cmd_vel))
self.pausePhysics()
rospy.logdebug("\nEnd taking action")
def _compute_reward(self):
rospy.logdebug("\nStart Computing Reward")
reward, done = -.1, False
if self.status == 'escaped':
reward = 100.
done = True
rospy.logerr("\n!!!!!!!!!!!!!!!!\nLogger Escaped !\n!!!!!!!!!!!!!!!!")
else:
if self.status == 'trapped':
if self.obs[1]<-5:
reward = 10*(self.prev_obs[1] - self.obs[1]) - 0.1
else:
reward = -100.
done = True
rospy.logdebug("End Computing Reward\n")
return reward, done
def _model_states_callback(self, data):
self.model_states = data
if __name__ == "__main__":
env = SoloEscape()
num_steps = env.max_episode_steps
obs = env.reset()
ep, st = 0, 0
for t in range(env.max_episode_steps):
a = t%2
o, r, d, i = env.step(a)
st += 1
rospy.loginfo("\n-\nepisode: {}, step: {} \nobs: {}, act: {}, reward: {}, done: {}, info: {}".format(ep+1, st, o, a, r, d, i))
if d:
ep += 1
st = 0
obs = env.reset()
| true | true |
1c31661c3a0ef59d87bdec5f52507688f7fa757f | 650 | py | Python | non_demo/tmp36_sensor/reporting.py | alexellis/datacenter-sensor | d51f8fe3debb7e11d0786c26f13c6a2fed7f659d | [
"MIT"
] | 22 | 2016-06-19T03:13:31.000Z | 2020-05-07T18:54:09.000Z | non_demo/tmp36_sensor/reporting.py | alexellis/datacenter-sensor | d51f8fe3debb7e11d0786c26f13c6a2fed7f659d | [
"MIT"
] | null | null | null | non_demo/tmp36_sensor/reporting.py | alexellis/datacenter-sensor | d51f8fe3debb7e11d0786c26f13c6a2fed7f659d | [
"MIT"
] | 9 | 2016-09-12T18:40:38.000Z | 2021-01-15T15:40:43.000Z | import redis
import socket
class Reporter:
def __init__(self, host, port):
self.host = host
self.port = port
self.name = socket.getfqdn()
def announce(self):
self.client = redis.StrictRedis(host=self.host, port=self.port, db=0)
self.client.hset("members", self.name, "1")
self.client.publish("members.add", self.name)
def set_key(self, key, values):
self.client.set(self.name+"."+key, values[key])
def set(self, values):
self.set_key("temp", values)
self.set_key("motion", values)
def publish(self):
self.client.publish("sensors.data", self.name)
| 26 | 77 | 0.62 | import redis
import socket
class Reporter:
def __init__(self, host, port):
self.host = host
self.port = port
self.name = socket.getfqdn()
def announce(self):
self.client = redis.StrictRedis(host=self.host, port=self.port, db=0)
self.client.hset("members", self.name, "1")
self.client.publish("members.add", self.name)
def set_key(self, key, values):
self.client.set(self.name+"."+key, values[key])
def set(self, values):
self.set_key("temp", values)
self.set_key("motion", values)
def publish(self):
self.client.publish("sensors.data", self.name)
| true | true |
1c31676b1fcdbcac0200c101e46d8f7dcaa9f51d | 290 | py | Python | Trinkey_QT2040_TRNG_Gadget/python/trng_read_json.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 665 | 2017-09-27T21:20:14.000Z | 2022-03-31T09:09:25.000Z | Trinkey_QT2040_TRNG_Gadget/python/trng_read_json.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 641 | 2017-10-03T19:46:37.000Z | 2022-03-30T18:28:46.000Z | Trinkey_QT2040_TRNG_Gadget/python/trng_read_json.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 734 | 2017-10-02T22:47:38.000Z | 2022-03-30T14:03:51.000Z | import json
import serial
# open serial port
ss = serial.Serial("/dev/ttyACM0")
# read string
_ = ss.readline() # first read may be incomplete, just toss it
raw_string = ss.readline().strip().decode()
# load JSON
json_data = json.loads(raw_string)
# print data
print(json_data['trng'])
| 18.125 | 62 | 0.72069 | import json
import serial
ss = serial.Serial("/dev/ttyACM0")
_ = ss.readline()
raw_string = ss.readline().strip().decode()
json_data = json.loads(raw_string)
print(json_data['trng'])
| true | true |
1c3168173a7050e47e1e563d085db1e50c81e804 | 2,127 | py | Python | TAHMO/__init__.py | lionfish0/TAHMO | 9e20bef1817b11d5dc476775369294de5daedb05 | [
"MIT"
] | null | null | null | TAHMO/__init__.py | lionfish0/TAHMO | 9e20bef1817b11d5dc476775369294de5daedb05 | [
"MIT"
] | null | null | null | TAHMO/__init__.py | lionfish0/TAHMO | 9e20bef1817b11d5dc476775369294de5daedb05 | [
"MIT"
] | null | null | null | import requests
import json
class TAHMO:
def __init__(self,baseurl='https://tahmoapi.mybluemix.net/v1'):
"""
Minimalist wrapper for accessing the TAHMO API (v1).
arguments:
baseurl = url of the API, default: https://tahmoapi.mybluemix.net/v1
"""
self.baseurl = baseurl
def setCredentials(self,api_id,api_secret):
"""
Set the id and secret needed for access to the API.
arguments:
api_id = id string
api_secret = secret string
"""
self.api_id = api_id
self.api_secret = api_secret
def setCredentialsFromJsonFile(self,api_credentials_file):
"""
If API id and secret are in a json file, can set them directly from the file, e.g:
tah = TAHMO()
tah.setCredentialsFromJsonFile('api.json')
arguments:
api_credentials_file = filename
"""
api_creds=json.load(open(api_credentials_file,'r'))
self.setCredentials(api_creds['id'], api_creds['secret'])
def get_stations(self):
"""
Get the list of stations you have access to, including various metadata.
"""
return self.__request('stations')['stations']
def get_measurements(self,station):
"""
Get the hourly measurements for a particular station, e.g:
get_measurements('TA00032")
arguments:
station = station id string to access.
"""
return self.__request('timeseries/%s/hourly' % station)
def __request(self,endpoint):
"""
Makes a request to the API for the specified endpoint.
"""
apiRequest = requests.get("%s/%s" % (self.baseurl,endpoint),
auth=requests.auth.HTTPBasicAuth(self.api_id, self.api_secret))
try:
json = apiRequest.json()
return json
except JSONDecodeError:
print("Failed to download or failed to parse JSON.")
print(apiRequest)
return None
| 34.306452 | 102 | 0.574518 | import requests
import json
class TAHMO:
def __init__(self,baseurl='https://tahmoapi.mybluemix.net/v1'):
self.baseurl = baseurl
def setCredentials(self,api_id,api_secret):
self.api_id = api_id
self.api_secret = api_secret
def setCredentialsFromJsonFile(self,api_credentials_file):
api_creds=json.load(open(api_credentials_file,'r'))
self.setCredentials(api_creds['id'], api_creds['secret'])
def get_stations(self):
return self.__request('stations')['stations']
def get_measurements(self,station):
return self.__request('timeseries/%s/hourly' % station)
def __request(self,endpoint):
apiRequest = requests.get("%s/%s" % (self.baseurl,endpoint),
auth=requests.auth.HTTPBasicAuth(self.api_id, self.api_secret))
try:
json = apiRequest.json()
return json
except JSONDecodeError:
print("Failed to download or failed to parse JSON.")
print(apiRequest)
return None
| true | true |
1c31689e12fa8efaa5472244e62dabcd6668b8c3 | 18,379 | py | Python | dadguide/monster_index.py | chasehult/padbot-cogs | 627b2e7be6103ca6fbe0cb8b6e0500ea8fc21bbb | [
"MIT"
] | null | null | null | dadguide/monster_index.py | chasehult/padbot-cogs | 627b2e7be6103ca6fbe0cb8b6e0500ea8fc21bbb | [
"MIT"
] | null | null | null | dadguide/monster_index.py | chasehult/padbot-cogs | 627b2e7be6103ca6fbe0cb8b6e0500ea8fc21bbb | [
"MIT"
] | null | null | null | import asyncio
import csv
import inspect
import io
import logging
import re
from collections import defaultdict
import aiohttp
from redbot.core.utils import AsyncIter
from tsutils import aobject
from .token_mappings import *
SHEETS_PATTERN = 'https://docs.google.com/spreadsheets/d/1EoZJ3w5xsXZ67kmarLE4vfrZSIIIAfj04HXeZVST3eY' \
'/pub?gid={}&single=true&output=csv'
NICKNAME_OVERRIDES_SHEET = SHEETS_PATTERN.format('0')
GROUP_TREENAMES_OVERRIDES_SHEET = SHEETS_PATTERN.format('2070615818')
PANTHNAME_OVERRIDES_SHEET = SHEETS_PATTERN.format('959933643')
NAME_TOKEN_ALIAS_SHEET = SHEETS_PATTERN.format('1229125459')
MODIFIER_OVERRIDE_SHEET = SHEETS_PATTERN.format('2089525837')
TREE_MODIFIER_OVERRIDE_SHEET = SHEETS_PATTERN.format('1372419168')
logger = logging.getLogger('red.pad-cogs.dadguide.monster_index')
class MonsterIndex2(aobject):
async def __ainit__(self, monsters, db):
self.graph = db.graph
self.monster_id_to_nickname = defaultdict(set)
self.monster_id_to_nametokens = defaultdict(set)
self.monster_id_to_treename = defaultdict(set)
self.series_id_to_pantheon_nickname = defaultdict(set, {m.series_id: {m.series.name_en.lower().replace(" ", "")}
for m
in db.get_all_monsters()})
self.mwtoken_creators = defaultdict(set)
self.multi_word_tokens = {tuple(m.series.name_en.lower().split())
for m
in db.get_all_monsters()
if " " in m.series.name_en}.union(MULTI_WORD_TOKENS)
self.replacement_tokens = defaultdict(set)
self.treename_overrides = set()
nickname_data, treenames_data, pantheon_data, nt_alias_data, mod_data, treemod_data = await asyncio.gather(
sheet_to_reader(NICKNAME_OVERRIDES_SHEET, 5),
sheet_to_reader(GROUP_TREENAMES_OVERRIDES_SHEET, 5),
sheet_to_reader(PANTHNAME_OVERRIDES_SHEET, 2),
sheet_to_reader(NAME_TOKEN_ALIAS_SHEET, 2),
sheet_to_reader(MODIFIER_OVERRIDE_SHEET, 2),
sheet_to_reader(TREE_MODIFIER_OVERRIDE_SHEET, 2),
)
for m_id, name, lp, ov, i in nickname_data:
if m_id.isdigit() and not i:
mid = int(m_id)
if ov:
self.treename_overrides.add(mid)
if lp:
self.monster_id_to_nametokens[mid].update(self._name_to_tokens(name))
else:
if " " in name:
self.mwtoken_creators[name.lower().replace(" ", "")].add(db.graph.get_monster(mid))
self.multi_word_tokens.add(tuple(name.lower().split(" ")))
self.monster_id_to_nickname[mid].add(name.lower().replace(" ", ""))
for m_id, name, mp, ov, i in treenames_data:
if m_id.isdigit() and not i:
mid = int(m_id)
if ov:
for emid in self.graph.get_alt_ids_by_id(mid):
self.treename_overrides.add(emid)
if mp:
for emid in self.graph.get_alt_ids_by_id(mid):
self.monster_id_to_nametokens[emid].update(self._name_to_tokens(name))
else:
if " " in name:
self.mwtoken_creators[name.lower().replace(" ", "")].add(db.graph.get_monster(mid))
self.multi_word_tokens.add(tuple(name.lower().split(" ")))
self.monster_id_to_treename[mid].add(name.lower().replace(" ", ""))
for sid, name in pantheon_data:
if sid.isdigit():
if " " in name:
self.multi_word_tokens.add(tuple(name.lower().split(" ")))
self.series_id_to_pantheon_nickname[int(sid)].add(name.lower().replace(" ", ""))
next(nt_alias_data) # Skip over heading
for tokens, alias in nt_alias_data:
self.replacement_tokens[frozenset(re.split(r'\W+', tokens))].add(alias)
self.manual_prefixes = defaultdict(set)
for mid, mods in mod_data:
if mid.isdigit():
mid = int(mid)
for mod in mods.split(","):
mod = mod.strip()
if " " in mod:
self.multi_word_tokens.add(tuple(mod.lower().split(" ")))
mod = mod.lower().replace(" ", "")
self.manual_prefixes[mid].update(get_modifier_aliases(mod))
for mid, mods in treemod_data:
if mid.isdigit():
mid = int(mid)
for mod in mods.split(","):
mod = mod.strip().lower()
if " " in mod:
self.multi_word_tokens.add(tuple(mod.split(" ")))
mod = mod.replace(" ", "")
aliases = get_modifier_aliases(mod)
for emid in self.graph.get_alt_ids_by_id(mid):
self.manual_prefixes[emid].update(aliases)
self._known_mods = {x for xs in self.series_id_to_pantheon_nickname.values()
for x in xs}.union(KNOWN_MODIFIERS)
self.manual_nick = defaultdict(set)
self.manual_tree = defaultdict(set)
self.name_tokens = defaultdict(set)
self.fluff_tokens = defaultdict(set)
self.modifiers = defaultdict(set)
await self._build_monster_index(monsters)
self.manual = combine_tokens_dicts(self.manual_nick, self.manual_tree)
self.all_name_tokens = combine_tokens_dicts(self.manual, self.fluff_tokens, self.name_tokens)
self.all_modifiers = {p for ps in self.modifiers.values() for p in ps}
self.suffixes = LEGAL_END_TOKENS
self.mwt_to_len = defaultdict(lambda: 1, {"".join(mw): len(mw) for mw in self.multi_word_tokens})
__init__ = __ainit__
async def _build_monster_index(self, monsters):
async for m in AsyncIter(monsters):
self.modifiers[m] = await self.get_modifiers(m)
# ID
self.manual_nick[str(m.monster_no_na)].add(m)
if m.monster_id > 10000:
self.manual_nick[str(m.monster_id)].add(m)
if m.monster_no_na != m.monster_no_jp:
self.name_tokens['na' + str(m.monster_no_na)].add(m)
self.name_tokens['jp' + str(m.monster_no_jp)].add(m)
# Name Tokens
nametokens = self._name_to_tokens(m.name_en)
last_token = m.name_en.split(',')[-1].strip()
alt_monsters = self.graph.get_alt_monsters(m)
autotoken = len(alt_monsters) > 1
for jpt in m.name_ja.split(" "):
self.name_tokens[jpt].add(m)
# Propagate name tokens throughout all evos
for me in alt_monsters:
if last_token != me.name_en.split(',')[-1].strip():
autotoken = False
for t in self.monster_id_to_nametokens[me.monster_id]:
if t in nametokens:
self.add_name_token(self.name_tokens, t, m)
# Find likely treenames
treenames = set()
regexes = [
r"(?:Awoken|Reincarnated) (.*)",
r".*, (.*'s Gem)",
]
for me in alt_monsters:
for r in regexes:
match = re.match(r, me.name_en)
if match:
treenames.add(match.group(1))
# Add important tokens
for t in self.monster_id_to_nametokens[m.monster_id]:
self.add_name_token(self.name_tokens, t, m)
if m.monster_id in self.treename_overrides:
pass
elif autotoken:
# Add a consistant last token as important token
for token in self._name_to_tokens(m.name_en.split(',')[-1].strip()):
self.add_name_token(self.name_tokens, token, m)
else:
# Add name tokens by guessing which ones are important
for token in self._get_important_tokens(m.name_en, treenames) + self._name_to_tokens(m.roma_subname):
self.add_name_token(self.name_tokens, token, m)
if m.is_equip:
possessives = re.findall(r"(\w+)'s", m.name_en.lower())
for mevo in alt_monsters:
for token2 in possessives:
if token2 in self._name_to_tokens(mevo.name_en.lower()):
self.add_name_token(self.name_tokens, token2, mevo)
else:
for mevo in alt_monsters:
if token in self._name_to_tokens(mevo.name_en):
self.add_name_token(self.name_tokens, token, mevo)
# For equips only, add every name token from every other non-equip monster in the tree.
# This has the effect of making automated name tokens behave slightly more like treenames
# as opposed to nicknames, but only when dealing with equips, and is valuable so that we get
# the moving-through-tree effect with higher priority, but without having to add
# significantly more complicated logic in the lookup later on.
# Test case: Mizutsune is a nickname for Dark Aurora, ID 4148. Issue: #614
if m.is_equip:
for mevo in alt_monsters:
if not mevo.is_equip:
for token2 in self._get_important_tokens(mevo.name_en, treenames):
self.add_name_token(self.name_tokens, token2, m)
# Fluff tokens
for token in nametokens:
if m in self.name_tokens[token.lower()]:
continue
self.add_name_token(self.fluff_tokens, token, m)
# Monster Nickname
for nick in self.monster_id_to_nickname[m.monster_id]:
self.add_name_token(self.manual_nick, nick, m)
# Tree Nickname
base_id = self.graph.get_base_id(m)
for nick in self.monster_id_to_treename[base_id]:
self.add_name_token(self.manual_tree, nick, m)
def add_name_token(self, token_dict, token, m):
if len(inspect.stack(0)) > 100:
logger.warning(f"Infinite loop detected in name token replacement with token {token}. Aborting.")
return
token_dict[token.lower()].add(m)
if token.lower() in self._known_mods and token.lower() not in HAZARDOUS_IN_NAME_PREFIXES:
self.modifiers[m].add(token.lower())
# Replacements
for ts in (k for k in self.replacement_tokens if token.lower() in k and all(m in token_dict[t] for t in k)):
for t in self.replacement_tokens[ts]:
self.add_name_token(token_dict, t, m)
@staticmethod
def _name_to_tokens(oname):
if not oname:
return []
oname = oname.lower().replace(',', '')
name = re.sub(r'[\-+\']', ' ', oname)
name = re.sub(r'[^a-z0-9 ]', '', name)
return [t.strip() for t in set(name.split() + oname.split()) if t]
@classmethod
def _get_important_tokens(cls, oname, treenames=None):
if treenames is None:
treenames = set()
name = oname.split(", ")
if len(name) == 1:
return cls._name_to_tokens(oname)
*n1, n2 = name
n1 = ", ".join(n1)
if treenames.intersection((n1, n2)):
return [t for n in treenames.intersection((n1, n2)) for t in cls._name_to_tokens(n)]
elif token_count(n1) == token_count(n2) or max(token_count(n1), token_count(n2)) < 3:
return cls._name_to_tokens(oname)
else:
return cls._name_to_tokens(min(n1, n2, key=token_count))
async def get_modifiers(self, m):
modifiers = self.manual_prefixes[m.monster_id].copy()
basemon = self.graph.get_base_monster(m)
# Main Color
modifiers.update(COLOR_MAP[m.attr1])
# Sub Color
modifiers.update(SUB_COLOR_MAP[m.attr2])
if m.attr1.value == 6:
modifiers.update(COLOR_MAP[m.attr2])
# Both Colors
modifiers.update(DUAL_COLOR_MAP[(m.attr1, m.attr2)])
# Type
for mt in m.types:
modifiers.update(TYPE_MAP[mt])
# Series
if m.series_id in self.series_id_to_pantheon_nickname:
modifiers.update(self.series_id_to_pantheon_nickname[m.series_id])
# Rarity
modifiers.add(str(m.rarity) + "*")
modifiers.add(str(basemon.rarity) + "*b")
# Base
if self.graph.monster_is_base(m):
modifiers.update(EVO_MAP[EvoTypes.BASE])
special_evo = ('覚醒' in m.name_ja or 'awoken' in m.name_en or '転生' in m.name_ja or
self.graph.true_evo_type_by_monster(m).value == "Reincarnated" or
'reincarnated' in m.name_en or
self.graph.true_evo_type_by_monster(m).value == "Super Reincarnated" or
m.is_equip or '極醒' in m.name_ja)
# Evo
if self.graph.monster_is_normal_evo(m) or self.graph.monster_is_first_evo(m):
modifiers.update(EVO_MAP[EvoTypes.EVO])
# Uvo
if self.graph.monster_is_reversible_evo(m) and not special_evo:
modifiers.update(EVO_MAP[EvoTypes.UVO])
# UUvo
if self.graph.monster_is_second_ultimate(m):
modifiers.update(EVO_MAP[EvoTypes.UUVO])
# Transform
if not self.graph.monster_is_transform_base(m):
modifiers.update(EVO_MAP[EvoTypes.TRANS])
elif self.graph.get_next_transform_by_monster(m):
modifiers.update(EVO_MAP[EvoTypes.BASETRANS])
# Awoken
if '覚醒' in m.name_ja or 'awoken' in m.name_en.lower():
modifiers.update(EVO_MAP[EvoTypes.AWOKEN])
# Mega Awoken
if '極醒' in m.name_ja or 'mega awoken' in m.name_en.lower():
modifiers.update(EVO_MAP[EvoTypes.MEGA])
# Reincarnated
if self.graph.true_evo_type_by_monster(m).value == "Reincarnated":
modifiers.update(EVO_MAP[EvoTypes.REVO])
# Super Reincarnated
if '超転生' in m.name_ja or self.graph.true_evo_type_by_monster(m).value == "Super Reincarnated":
modifiers.update(EVO_MAP[EvoTypes.SREVO])
# Pixel
if (m.name_ja.startswith('ドット') or
m.name_en.startswith('pixel') or
self.graph.true_evo_type_by_monster(m).value == "Pixel"):
modifiers.update(EVO_MAP[EvoTypes.PIXEL])
else:
modifiers.update(EVO_MAP[EvoTypes.NONPIXEL])
# Awakenings
for aw in m.awakenings:
modifiers.update(AWOKEN_MAP[Awakenings(aw.awoken_skill_id)])
# Chibi
if (m.name_en == m.name_en.lower() and m.name_en != m.name_ja) or \
'ミニ' in m.name_ja or '(chibi)' in m.name_en:
modifiers.update(MISC_MAP[MiscModifiers.CHIBI])
# Series Type
if m.series.series_type == 'regular':
modifiers.update(MISC_MAP[MiscModifiers.REGULAR])
if m.series.series_type == 'event':
modifiers.update(MISC_MAP[MiscModifiers.EVENT])
if m.series.series_type == 'seasonal':
modifiers.update(MISC_MAP[MiscModifiers.SEASONAL])
if m.series.series_type == 'collab':
modifiers.update(MISC_MAP[MiscModifiers.COLLAB])
# Story
def is_story(m, do_transform=True):
if m.series_id == 196 or any(mat.series_id == 196 for mat in self.graph.evo_mats_by_monster(m)):
return True
if do_transform:
for pt in self.graph.get_transform_monsters(m):
if is_story(pt, False):
return True
pe = self.graph.get_prev_evolution_by_monster(m)
if pe and is_story(pe):
return True
return False
if is_story(m):
modifiers.update(MISC_MAP[MiscModifiers.STORY])
# Method of Obtaining
if self.graph.monster_is_farmable_evo(m) or self.graph.monster_is_mp_evo(m):
modifiers.update(MISC_MAP[MiscModifiers.FARMABLE])
if self.graph.monster_is_mp_evo(m):
modifiers.update(MISC_MAP[MiscModifiers.MP])
if self.graph.monster_is_rem_evo(m):
modifiers.update(MISC_MAP[MiscModifiers.REM])
# Server
if m.on_jp:
modifiers.update(MISC_MAP[MiscModifiers.INJP])
if not m.on_na:
modifiers.update(MISC_MAP[MiscModifiers.ONLYJP])
if m.on_na:
modifiers.update(MISC_MAP[MiscModifiers.INNA])
if not m.on_jp:
modifiers.update(MISC_MAP[MiscModifiers.ONLYNA])
if m.monster_id + 10000 in self.graph.nodes:
modifiers.add("idjp")
if m.monster_id > 10000:
modifiers.add("idna")
return modifiers
# TODO: Move this to TSUtils
async def sheet_to_reader(url, length=None):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
file = io.StringIO(await response.text())
if length is None:
return csv.reader(file, delimiter=',')
else:
return ((line + [None] * length)[:length] for line in csv.reader(file, delimiter=','))
def copydict(token_dict):
copy = defaultdict(set)
for k, v in token_dict.items():
copy[k] = v.copy()
return copy
def combine_tokens_dicts(d1, *ds):
combined = defaultdict(set, d1.copy())
for d2 in ds:
for k, v in d2.items():
combined[k] = combined[k].union(v)
return combined
def token_count(tstr):
tstr = re.sub(r"\(.+\)", "", tstr)
return len(re.split(r'\W+', tstr))
def get_modifier_aliases(mod):
output = {mod}
for mods in ALL_TOKEN_DICTS:
if mod in mods:
output.update(mods)
return output
| 41.20852 | 120 | 0.580717 | import asyncio
import csv
import inspect
import io
import logging
import re
from collections import defaultdict
import aiohttp
from redbot.core.utils import AsyncIter
from tsutils import aobject
from .token_mappings import *
SHEETS_PATTERN = 'https://docs.google.com/spreadsheets/d/1EoZJ3w5xsXZ67kmarLE4vfrZSIIIAfj04HXeZVST3eY' \
'/pub?gid={}&single=true&output=csv'
NICKNAME_OVERRIDES_SHEET = SHEETS_PATTERN.format('0')
GROUP_TREENAMES_OVERRIDES_SHEET = SHEETS_PATTERN.format('2070615818')
PANTHNAME_OVERRIDES_SHEET = SHEETS_PATTERN.format('959933643')
NAME_TOKEN_ALIAS_SHEET = SHEETS_PATTERN.format('1229125459')
MODIFIER_OVERRIDE_SHEET = SHEETS_PATTERN.format('2089525837')
TREE_MODIFIER_OVERRIDE_SHEET = SHEETS_PATTERN.format('1372419168')
logger = logging.getLogger('red.pad-cogs.dadguide.monster_index')
class MonsterIndex2(aobject):
async def __ainit__(self, monsters, db):
self.graph = db.graph
self.monster_id_to_nickname = defaultdict(set)
self.monster_id_to_nametokens = defaultdict(set)
self.monster_id_to_treename = defaultdict(set)
self.series_id_to_pantheon_nickname = defaultdict(set, {m.series_id: {m.series.name_en.lower().replace(" ", "")}
for m
in db.get_all_monsters()})
self.mwtoken_creators = defaultdict(set)
self.multi_word_tokens = {tuple(m.series.name_en.lower().split())
for m
in db.get_all_monsters()
if " " in m.series.name_en}.union(MULTI_WORD_TOKENS)
self.replacement_tokens = defaultdict(set)
self.treename_overrides = set()
nickname_data, treenames_data, pantheon_data, nt_alias_data, mod_data, treemod_data = await asyncio.gather(
sheet_to_reader(NICKNAME_OVERRIDES_SHEET, 5),
sheet_to_reader(GROUP_TREENAMES_OVERRIDES_SHEET, 5),
sheet_to_reader(PANTHNAME_OVERRIDES_SHEET, 2),
sheet_to_reader(NAME_TOKEN_ALIAS_SHEET, 2),
sheet_to_reader(MODIFIER_OVERRIDE_SHEET, 2),
sheet_to_reader(TREE_MODIFIER_OVERRIDE_SHEET, 2),
)
for m_id, name, lp, ov, i in nickname_data:
if m_id.isdigit() and not i:
mid = int(m_id)
if ov:
self.treename_overrides.add(mid)
if lp:
self.monster_id_to_nametokens[mid].update(self._name_to_tokens(name))
else:
if " " in name:
self.mwtoken_creators[name.lower().replace(" ", "")].add(db.graph.get_monster(mid))
self.multi_word_tokens.add(tuple(name.lower().split(" ")))
self.monster_id_to_nickname[mid].add(name.lower().replace(" ", ""))
for m_id, name, mp, ov, i in treenames_data:
if m_id.isdigit() and not i:
mid = int(m_id)
if ov:
for emid in self.graph.get_alt_ids_by_id(mid):
self.treename_overrides.add(emid)
if mp:
for emid in self.graph.get_alt_ids_by_id(mid):
self.monster_id_to_nametokens[emid].update(self._name_to_tokens(name))
else:
if " " in name:
self.mwtoken_creators[name.lower().replace(" ", "")].add(db.graph.get_monster(mid))
self.multi_word_tokens.add(tuple(name.lower().split(" ")))
self.monster_id_to_treename[mid].add(name.lower().replace(" ", ""))
for sid, name in pantheon_data:
if sid.isdigit():
if " " in name:
self.multi_word_tokens.add(tuple(name.lower().split(" ")))
self.series_id_to_pantheon_nickname[int(sid)].add(name.lower().replace(" ", ""))
next(nt_alias_data)
for tokens, alias in nt_alias_data:
self.replacement_tokens[frozenset(re.split(r'\W+', tokens))].add(alias)
self.manual_prefixes = defaultdict(set)
for mid, mods in mod_data:
if mid.isdigit():
mid = int(mid)
for mod in mods.split(","):
mod = mod.strip()
if " " in mod:
self.multi_word_tokens.add(tuple(mod.lower().split(" ")))
mod = mod.lower().replace(" ", "")
self.manual_prefixes[mid].update(get_modifier_aliases(mod))
for mid, mods in treemod_data:
if mid.isdigit():
mid = int(mid)
for mod in mods.split(","):
mod = mod.strip().lower()
if " " in mod:
self.multi_word_tokens.add(tuple(mod.split(" ")))
mod = mod.replace(" ", "")
aliases = get_modifier_aliases(mod)
for emid in self.graph.get_alt_ids_by_id(mid):
self.manual_prefixes[emid].update(aliases)
self._known_mods = {x for xs in self.series_id_to_pantheon_nickname.values()
for x in xs}.union(KNOWN_MODIFIERS)
self.manual_nick = defaultdict(set)
self.manual_tree = defaultdict(set)
self.name_tokens = defaultdict(set)
self.fluff_tokens = defaultdict(set)
self.modifiers = defaultdict(set)
await self._build_monster_index(monsters)
self.manual = combine_tokens_dicts(self.manual_nick, self.manual_tree)
self.all_name_tokens = combine_tokens_dicts(self.manual, self.fluff_tokens, self.name_tokens)
self.all_modifiers = {p for ps in self.modifiers.values() for p in ps}
self.suffixes = LEGAL_END_TOKENS
self.mwt_to_len = defaultdict(lambda: 1, {"".join(mw): len(mw) for mw in self.multi_word_tokens})
__init__ = __ainit__
async def _build_monster_index(self, monsters):
async for m in AsyncIter(monsters):
self.modifiers[m] = await self.get_modifiers(m)
self.manual_nick[str(m.monster_no_na)].add(m)
if m.monster_id > 10000:
self.manual_nick[str(m.monster_id)].add(m)
if m.monster_no_na != m.monster_no_jp:
self.name_tokens['na' + str(m.monster_no_na)].add(m)
self.name_tokens['jp' + str(m.monster_no_jp)].add(m)
nametokens = self._name_to_tokens(m.name_en)
last_token = m.name_en.split(',')[-1].strip()
alt_monsters = self.graph.get_alt_monsters(m)
autotoken = len(alt_monsters) > 1
for jpt in m.name_ja.split(" "):
self.name_tokens[jpt].add(m)
for me in alt_monsters:
if last_token != me.name_en.split(',')[-1].strip():
autotoken = False
for t in self.monster_id_to_nametokens[me.monster_id]:
if t in nametokens:
self.add_name_token(self.name_tokens, t, m)
treenames = set()
regexes = [
r"(?:Awoken|Reincarnated) (.*)",
r".*, (.*'s Gem)",
]
for me in alt_monsters:
for r in regexes:
match = re.match(r, me.name_en)
if match:
treenames.add(match.group(1))
# Add important tokens
for t in self.monster_id_to_nametokens[m.monster_id]:
self.add_name_token(self.name_tokens, t, m)
if m.monster_id in self.treename_overrides:
pass
elif autotoken:
# Add a consistant last token as important token
for token in self._name_to_tokens(m.name_en.split(',')[-1].strip()):
self.add_name_token(self.name_tokens, token, m)
else:
# Add name tokens by guessing which ones are important
for token in self._get_important_tokens(m.name_en, treenames) + self._name_to_tokens(m.roma_subname):
self.add_name_token(self.name_tokens, token, m)
if m.is_equip:
possessives = re.findall(r"(\w+)'s", m.name_en.lower())
for mevo in alt_monsters:
for token2 in possessives:
if token2 in self._name_to_tokens(mevo.name_en.lower()):
self.add_name_token(self.name_tokens, token2, mevo)
else:
for mevo in alt_monsters:
if token in self._name_to_tokens(mevo.name_en):
self.add_name_token(self.name_tokens, token, mevo)
if m.is_equip:
for mevo in alt_monsters:
if not mevo.is_equip:
for token2 in self._get_important_tokens(mevo.name_en, treenames):
self.add_name_token(self.name_tokens, token2, m)
for token in nametokens:
if m in self.name_tokens[token.lower()]:
continue
self.add_name_token(self.fluff_tokens, token, m)
for nick in self.monster_id_to_nickname[m.monster_id]:
self.add_name_token(self.manual_nick, nick, m)
base_id = self.graph.get_base_id(m)
for nick in self.monster_id_to_treename[base_id]:
self.add_name_token(self.manual_tree, nick, m)
def add_name_token(self, token_dict, token, m):
if len(inspect.stack(0)) > 100:
logger.warning(f"Infinite loop detected in name token replacement with token {token}. Aborting.")
return
token_dict[token.lower()].add(m)
if token.lower() in self._known_mods and token.lower() not in HAZARDOUS_IN_NAME_PREFIXES:
self.modifiers[m].add(token.lower())
for ts in (k for k in self.replacement_tokens if token.lower() in k and all(m in token_dict[t] for t in k)):
for t in self.replacement_tokens[ts]:
self.add_name_token(token_dict, t, m)
@staticmethod
def _name_to_tokens(oname):
if not oname:
return []
oname = oname.lower().replace(',', '')
name = re.sub(r'[\-+\']', ' ', oname)
name = re.sub(r'[^a-z0-9 ]', '', name)
return [t.strip() for t in set(name.split() + oname.split()) if t]
@classmethod
def _get_important_tokens(cls, oname, treenames=None):
if treenames is None:
treenames = set()
name = oname.split(", ")
if len(name) == 1:
return cls._name_to_tokens(oname)
*n1, n2 = name
n1 = ", ".join(n1)
if treenames.intersection((n1, n2)):
return [t for n in treenames.intersection((n1, n2)) for t in cls._name_to_tokens(n)]
elif token_count(n1) == token_count(n2) or max(token_count(n1), token_count(n2)) < 3:
return cls._name_to_tokens(oname)
else:
return cls._name_to_tokens(min(n1, n2, key=token_count))
async def get_modifiers(self, m):
modifiers = self.manual_prefixes[m.monster_id].copy()
basemon = self.graph.get_base_monster(m)
# Main Color
modifiers.update(COLOR_MAP[m.attr1])
# Sub Color
modifiers.update(SUB_COLOR_MAP[m.attr2])
if m.attr1.value == 6:
modifiers.update(COLOR_MAP[m.attr2])
# Both Colors
modifiers.update(DUAL_COLOR_MAP[(m.attr1, m.attr2)])
# Type
for mt in m.types:
modifiers.update(TYPE_MAP[mt])
# Series
if m.series_id in self.series_id_to_pantheon_nickname:
modifiers.update(self.series_id_to_pantheon_nickname[m.series_id])
# Rarity
modifiers.add(str(m.rarity) + "*")
modifiers.add(str(basemon.rarity) + "*b")
# Base
if self.graph.monster_is_base(m):
modifiers.update(EVO_MAP[EvoTypes.BASE])
special_evo = ('覚醒' in m.name_ja or 'awoken' in m.name_en or '転生' in m.name_ja or
self.graph.true_evo_type_by_monster(m).value == "Reincarnated" or
'reincarnated' in m.name_en or
self.graph.true_evo_type_by_monster(m).value == "Super Reincarnated" or
m.is_equip or '極醒' in m.name_ja)
# Evo
if self.graph.monster_is_normal_evo(m) or self.graph.monster_is_first_evo(m):
modifiers.update(EVO_MAP[EvoTypes.EVO])
# Uvo
if self.graph.monster_is_reversible_evo(m) and not special_evo:
modifiers.update(EVO_MAP[EvoTypes.UVO])
# UUvo
if self.graph.monster_is_second_ultimate(m):
modifiers.update(EVO_MAP[EvoTypes.UUVO])
# Transform
if not self.graph.monster_is_transform_base(m):
modifiers.update(EVO_MAP[EvoTypes.TRANS])
elif self.graph.get_next_transform_by_monster(m):
modifiers.update(EVO_MAP[EvoTypes.BASETRANS])
# Awoken
if '覚醒' in m.name_ja or 'awoken' in m.name_en.lower():
modifiers.update(EVO_MAP[EvoTypes.AWOKEN])
# Mega Awoken
if '極醒' in m.name_ja or 'mega awoken' in m.name_en.lower():
modifiers.update(EVO_MAP[EvoTypes.MEGA])
# Reincarnated
if self.graph.true_evo_type_by_monster(m).value == "Reincarnated":
modifiers.update(EVO_MAP[EvoTypes.REVO])
# Super Reincarnated
if '超転生' in m.name_ja or self.graph.true_evo_type_by_monster(m).value == "Super Reincarnated":
modifiers.update(EVO_MAP[EvoTypes.SREVO])
# Pixel
if (m.name_ja.startswith('ドット') or
m.name_en.startswith('pixel') or
self.graph.true_evo_type_by_monster(m).value == "Pixel"):
modifiers.update(EVO_MAP[EvoTypes.PIXEL])
else:
modifiers.update(EVO_MAP[EvoTypes.NONPIXEL])
# Awakenings
for aw in m.awakenings:
modifiers.update(AWOKEN_MAP[Awakenings(aw.awoken_skill_id)])
# Chibi
if (m.name_en == m.name_en.lower() and m.name_en != m.name_ja) or \
'ミニ' in m.name_ja or '(chibi)' in m.name_en:
modifiers.update(MISC_MAP[MiscModifiers.CHIBI])
# Series Type
if m.series.series_type == 'regular':
modifiers.update(MISC_MAP[MiscModifiers.REGULAR])
if m.series.series_type == 'event':
modifiers.update(MISC_MAP[MiscModifiers.EVENT])
if m.series.series_type == 'seasonal':
modifiers.update(MISC_MAP[MiscModifiers.SEASONAL])
if m.series.series_type == 'collab':
modifiers.update(MISC_MAP[MiscModifiers.COLLAB])
# Story
def is_story(m, do_transform=True):
if m.series_id == 196 or any(mat.series_id == 196 for mat in self.graph.evo_mats_by_monster(m)):
return True
if do_transform:
for pt in self.graph.get_transform_monsters(m):
if is_story(pt, False):
return True
pe = self.graph.get_prev_evolution_by_monster(m)
if pe and is_story(pe):
return True
return False
if is_story(m):
modifiers.update(MISC_MAP[MiscModifiers.STORY])
# Method of Obtaining
if self.graph.monster_is_farmable_evo(m) or self.graph.monster_is_mp_evo(m):
modifiers.update(MISC_MAP[MiscModifiers.FARMABLE])
if self.graph.monster_is_mp_evo(m):
modifiers.update(MISC_MAP[MiscModifiers.MP])
if self.graph.monster_is_rem_evo(m):
modifiers.update(MISC_MAP[MiscModifiers.REM])
# Server
if m.on_jp:
modifiers.update(MISC_MAP[MiscModifiers.INJP])
if not m.on_na:
modifiers.update(MISC_MAP[MiscModifiers.ONLYJP])
if m.on_na:
modifiers.update(MISC_MAP[MiscModifiers.INNA])
if not m.on_jp:
modifiers.update(MISC_MAP[MiscModifiers.ONLYNA])
if m.monster_id + 10000 in self.graph.nodes:
modifiers.add("idjp")
if m.monster_id > 10000:
modifiers.add("idna")
return modifiers
# TODO: Move this to TSUtils
async def sheet_to_reader(url, length=None):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
file = io.StringIO(await response.text())
if length is None:
return csv.reader(file, delimiter=',')
else:
return ((line + [None] * length)[:length] for line in csv.reader(file, delimiter=','))
def copydict(token_dict):
copy = defaultdict(set)
for k, v in token_dict.items():
copy[k] = v.copy()
return copy
def combine_tokens_dicts(d1, *ds):
combined = defaultdict(set, d1.copy())
for d2 in ds:
for k, v in d2.items():
combined[k] = combined[k].union(v)
return combined
def token_count(tstr):
tstr = re.sub(r"\(.+\)", "", tstr)
return len(re.split(r'\W+', tstr))
def get_modifier_aliases(mod):
output = {mod}
for mods in ALL_TOKEN_DICTS:
if mod in mods:
output.update(mods)
return output
| true | true |
1c31689e64399dba81eb63ef2f0d091a4e35c238 | 3,249 | py | Python | tools/setoverscan.py | danielforgacs/Maya-tools | 66d030d3a3ba7e8caa5145a6f141f8eca15b7729 | [
"MIT"
] | 4 | 2018-06-06T22:42:34.000Z | 2021-01-16T06:28:54.000Z | tools/setoverscan.py | danielforgacs/Maya-tools | 66d030d3a3ba7e8caa5145a6f141f8eca15b7729 | [
"MIT"
] | null | null | null | tools/setoverscan.py | danielforgacs/Maya-tools | 66d030d3a3ba7e8caa5145a6f141f8eca15b7729 | [
"MIT"
] | 1 | 2020-02-11T07:08:33.000Z | 2020-02-11T07:08:33.000Z | """
calculate and set camera values
for overscan in one camera setup
overscan is not uniform. It matches
image proportions - rounded
if the selected camera has post scale
you get an error - no duplaicate overscan
default overscan:
50 / 50 pixels :: top / bottom
Select camera, call: main(); main(pixels=30)
for tests call: tests()
"""
from fractions import Fraction
try:
import pymel.core
except:
pm = None
# def set_camera_post_scale_OBSOLTET(ratio):
# cam = None
# try:
# cam = pymel.core.selected().pop().getShape()
# except:
# pass
# if cam and cam.postScale.get() == 1.0:
# cam.postScale.set(ratio)
# else:
# raise Exception('--> Camera already has post scale!')
# def set_osc_resolution_OBSOLETE(pixels=10):
# rendersettings = pymel.core.PyNode('defaultResolution')
# res_x_plate = rendersettings.width.get()
# res_y_plate = rendersettings.height.get()
# image_ratio = Fraction(res_x_plate, res_y_plate)
# res_y_overscan = res_y_plate + (pixels * 2)
# overscan_scale = Fraction(res_y_overscan, res_y_plate)
# cam_postscale = Fraction(res_y_plate, res_y_overscan)
# res_x_overscan_float = float(res_x_plate / cam_postscale)
# res_x_overscan = int(round(res_x_overscan_float))
# set_camera_post_scale(float(cam_postscale))
# rendersettings.width.set(res_x_overscan)
# rendersettings.height.set(res_y_overscan)
# # return (res_x_overscan, res_y_overscan)
# return (res_x_plate,
# res_y_plate,
# res_x_overscan,
# res_y_overscan,
# res_x_overscan_float,
# overscan_scale,
# cam_postscale,
# image_ratio,
# )
def get_osc_values(resx, resy, pixels):
resy_osc = resy + (pixels * 2)
osc_scale = resy_osc / resy
postscale = 1 / osc_scale
resx_osc = resx * osc_scale
return {
'x_osc': int(round(resx_osc)),
'y_osc': int(resy_osc),
'postscale': postscale,
'x': resx,
'y': resy,
'x_osc_float': resx_osc,
'osc_scale': osc_scale,
'ratio': resx / resy,
}
def main(**kwargs):
resx = kwargs.get('resx', 2048)
resy = kwargs.get('resy', 1152)
pixels = kwargs.get('pixels', 50)
values = get_osc_values(resx, resy, pixels)
# osc = set_osc_resolution(pixels=kwargs.get('pixels', 50))
print('\n--> overscan res (rounded): {0} x {1}'.format(values['x_osc'], values['y_osc']))
print('--> camera post scale: {0}'.format(values['postscale']))
print('\nplate resolution: {0} x {1}'.format(values['x'], values['y']))
print('overscan resolution: {0} x {1}'.format(values['x_osc_float'], values['y_osc']))
print('overscan scale: {0}'.format(values['osc_scale']))
print('image ratio: {0}'.format(values['ratio']))
print('resolution difference: {0} x {1}'.format(values['x_osc'] - values['x'], values['y_osc'] - values['y']))
def tests():
import setoverscan_tests
try:
reload(setoverscan_tests)
except:
import importlib
importlib.reload(setoverscan_tests)
setoverscan_tests.main()
if __name__ == '__main__':
main() | 26.631148 | 114 | 0.627886 |
from fractions import Fraction
try:
import pymel.core
except:
pm = None
, pixels):
resy_osc = resy + (pixels * 2)
osc_scale = resy_osc / resy
postscale = 1 / osc_scale
resx_osc = resx * osc_scale
return {
'x_osc': int(round(resx_osc)),
'y_osc': int(resy_osc),
'postscale': postscale,
'x': resx,
'y': resy,
'x_osc_float': resx_osc,
'osc_scale': osc_scale,
'ratio': resx / resy,
}
def main(**kwargs):
resx = kwargs.get('resx', 2048)
resy = kwargs.get('resy', 1152)
pixels = kwargs.get('pixels', 50)
values = get_osc_values(resx, resy, pixels)
print('\n--> overscan res (rounded): {0} x {1}'.format(values['x_osc'], values['y_osc']))
print('--> camera post scale: {0}'.format(values['postscale']))
print('\nplate resolution: {0} x {1}'.format(values['x'], values['y']))
print('overscan resolution: {0} x {1}'.format(values['x_osc_float'], values['y_osc']))
print('overscan scale: {0}'.format(values['osc_scale']))
print('image ratio: {0}'.format(values['ratio']))
print('resolution difference: {0} x {1}'.format(values['x_osc'] - values['x'], values['y_osc'] - values['y']))
def tests():
import setoverscan_tests
try:
reload(setoverscan_tests)
except:
import importlib
importlib.reload(setoverscan_tests)
setoverscan_tests.main()
if __name__ == '__main__':
main() | true | true |
1c31699971d7e0eabc1fd2fcf67999df6c0d79fc | 386 | py | Python | nepali/datetime/__init__.py | aj3sh/nepali | bae66301eca7bfb51c53d54bfda56d6e425ef0a6 | [
"MIT"
] | 2 | 2019-05-25T15:17:56.000Z | 2020-09-24T07:21:00.000Z | nepali/datetime/__init__.py | aj3sh/nepali | bae66301eca7bfb51c53d54bfda56d6e425ef0a6 | [
"MIT"
] | null | null | null | nepali/datetime/__init__.py | aj3sh/nepali | bae66301eca7bfb51c53d54bfda56d6e425ef0a6 | [
"MIT"
] | 2 | 2020-04-03T05:50:46.000Z | 2020-10-15T16:18:37.000Z | from ._datetime import nepalidate, nepalitime, nepalidatetime, NepaliDate, NepaliDateTime, NepaliTime
from ._formarter import NepaliDateTimeFormater
from ._humanize import HumanizeDateTime, nepalihumanize
__all__ = [
'nepalidate',
'nepalitime',
'nepalidatetime',
'NepaliDate',
'NepaliTime',
'NepaliDateTime',
'nepalihumanize',
'NepaliDateTimeFormater',
'HumanizeDateTime',
] | 22.705882 | 101 | 0.790155 | from ._datetime import nepalidate, nepalitime, nepalidatetime, NepaliDate, NepaliDateTime, NepaliTime
from ._formarter import NepaliDateTimeFormater
from ._humanize import HumanizeDateTime, nepalihumanize
__all__ = [
'nepalidate',
'nepalitime',
'nepalidatetime',
'NepaliDate',
'NepaliTime',
'NepaliDateTime',
'nepalihumanize',
'NepaliDateTimeFormater',
'HumanizeDateTime',
] | true | true |
1c316a35b647ab9f67f0da8abbc922b8d9cec814 | 322 | py | Python | app/util/constants.py | danvf/py-challenge | 3d9b3469b7491dc2cf639a58a995a3f456da08f9 | [
"MIT"
] | null | null | null | app/util/constants.py | danvf/py-challenge | 3d9b3469b7491dc2cf639a58a995a3f456da08f9 | [
"MIT"
] | null | null | null | app/util/constants.py | danvf/py-challenge | 3d9b3469b7491dc2cf639a58a995a3f456da08f9 | [
"MIT"
] | null | null | null | INPUT_ERROR = "Input does not apply!"
NONEXISTENT_INPUT = "Nonexistent input!"
INITIAL_STATE = "TIL"
INTERMEDIATE_STATE = "TI"
CALL_STATE = "TL"
PRESS_BUTTON_CALL = "bl"
PRESS_BUTTON_DISMISS = "bd"
FLAG_AVATAR_DISPLAYED = "ad"
FLAG_POPUP_NO_NETWORK = "pqr"
FLAG_POPUP_CALL_DISMISSED = "pna"
FLAG_POPUP_ENDING_CALL = "pfc"
| 26.833333 | 40 | 0.776398 | INPUT_ERROR = "Input does not apply!"
NONEXISTENT_INPUT = "Nonexistent input!"
INITIAL_STATE = "TIL"
INTERMEDIATE_STATE = "TI"
CALL_STATE = "TL"
PRESS_BUTTON_CALL = "bl"
PRESS_BUTTON_DISMISS = "bd"
FLAG_AVATAR_DISPLAYED = "ad"
FLAG_POPUP_NO_NETWORK = "pqr"
FLAG_POPUP_CALL_DISMISSED = "pna"
FLAG_POPUP_ENDING_CALL = "pfc"
| true | true |
1c316b92cc14f69fcda8c086090d31b59c6d2c6a | 5,011 | py | Python | sciencebeam_gym/models/text/crf/autocut_model.py | elifesciences/sciencebeam-gym | 3ad654e08775e0c0cdd256753e14093bb5a42d44 | [
"MIT"
] | 25 | 2017-07-25T12:44:55.000Z | 2020-09-30T22:16:50.000Z | sciencebeam_gym/models/text/crf/autocut_model.py | elifesciences/sciencebeam-gym | 3ad654e08775e0c0cdd256753e14093bb5a42d44 | [
"MIT"
] | 192 | 2017-11-29T08:57:03.000Z | 2022-03-29T18:44:41.000Z | sciencebeam_gym/models/text/crf/autocut_model.py | elifesciences/sciencebeam-gym | 3ad654e08775e0c0cdd256753e14093bb5a42d44 | [
"MIT"
] | 6 | 2019-02-01T18:49:33.000Z | 2020-07-26T08:18:46.000Z | import nltk
from sciencebeam_utils.utils.collection import extend_dict
from sciencebeam_alignment.align import (
LocalSequenceMatcher,
SimpleScoring
)
from .crfsuite_model import CrfSuiteModel
DEFAULT_SCORING = SimpleScoring(
match_score=2,
mismatch_score=-1,
gap_score=-2
)
MATCH_LABEL = 'x'
OTHER_LABEL = '_'
def get_labels_match(expected, actual, match_label=MATCH_LABEL, other_label=OTHER_LABEL):
if not actual:
return ''
if not expected:
return '_' * len(actual)
try:
sm = LocalSequenceMatcher(a=expected.lower(), b=actual.lower(), scoring=DEFAULT_SCORING)
matching_blocks = sm.get_matching_blocks()
from_actual = min(b for _, b, size in matching_blocks if size)
to_actual = max(b + size for _, b, size in matching_blocks if size)
return (
(other_label * from_actual)
+ (match_label * (to_actual - from_actual))
+ (other_label * (len(actual) - to_actual))
)
except IndexError as e:
raise IndexError('%s: expected=[%s], actual=[%s]' % (e, expected, actual)) from e
def span_word_tokenize(txt):
tokens = nltk.word_tokenize(txt)
offset = 0
for token in tokens:
offset = txt.find(token, offset)
yield token, offset, offset + len(token)
offset += len(token)
def get_word_index_by_char_index_map(spans):
return {
index: word_index
for word_index, (_, start, end) in enumerate(spans)
for index in range(start, end)
}
def get_span_words(spans):
return [word for word, _, _ in spans]
def get_word_by_char_index_map(spans):
spans = list(spans)
words = get_span_words(spans)
return {
index: words[word_index]
for index, word_index in get_word_index_by_char_index_map(spans).items()
}
def get_char_features(prefix, ch):
return {
'%s.lower' % prefix: ch.lower(),
'%s.isupper' % prefix: ch.isupper(),
'%s.istitle' % prefix: ch.istitle(),
'%s.isdigit' % prefix: ch.isdigit(),
}
def get_word_features(prefix, word):
return {
'%s.lower' % prefix: word.lower(),
'%s.isupper' % prefix: word.isupper(),
'%s.istitle' % prefix: word.istitle(),
'%s.isdigit' % prefix: word.isdigit(),
}
def get_sentence_char_features(
char_index,
char_by_index_map,
word_index_by_char_index_map,
word_by_index_map):
word_index = word_index_by_char_index_map.get(char_index, -10)
d = extend_dict(
{},
get_char_features('char', char_by_index_map.get(char_index, '')),
get_word_features('word', word_by_index_map.get(word_index, '')),
{
'char_index': char_index,
'word_index': word_index,
'bias': 1.0
}
)
for i in range(1, 1 + 3):
d.update(get_char_features('char[-%d]' % i, char_by_index_map.get(char_index - i, '')))
d.update(get_char_features('char[+%d]' % i, char_by_index_map.get(char_index + i, '')))
d.update(get_word_features('word[-%d]' % i, word_by_index_map.get(word_index - i, '')))
d.update(get_word_features('word[+%d]' % i, word_by_index_map.get(word_index + i, '')))
return d
def sentence_to_features(sentence):
spans = list(span_word_tokenize(sentence))
word_by_index_map = dict(enumerate(get_span_words(spans)))
word_index_by_char_index_map = get_word_index_by_char_index_map(spans)
char_by_index_map = dict(enumerate(sentence))
return [
get_sentence_char_features(
char_index,
char_by_index_map,
word_index_by_char_index_map=word_index_by_char_index_map,
word_by_index_map=word_by_index_map
)
for char_index in range(len(sentence))
]
def get_value_using_predicted_character_labels(
source_value, character_labels,
match_label=MATCH_LABEL, other_label=OTHER_LABEL):
try:
start = character_labels.index(match_label)
except ValueError:
return ''
try:
end = start + character_labels[start:].index(other_label, start)
except ValueError:
end = len(source_value)
return source_value[start:end]
class AutocutModel(CrfSuiteModel):
def _transform_x(self, X):
return [sentence_to_features(item) for item in X]
def _transform_y(self, y, X):
return [
get_labels_match(expected, actual)
for expected, actual in zip(y, X)
]
def _rev_transform_y(self, y_pred, X):
return [
get_value_using_predicted_character_labels(source_value, character_labels)
for source_value, character_labels in zip(X, y_pred)
]
def fit(self, X, y, X_dev=None, y_dev=None):
super().fit(self._transform_x(X), self._transform_y(y, X=X))
def predict(self, X):
return self._rev_transform_y(
super().predict(self._transform_x(X)),
X=X
)
| 29.827381 | 96 | 0.639593 | import nltk
from sciencebeam_utils.utils.collection import extend_dict
from sciencebeam_alignment.align import (
LocalSequenceMatcher,
SimpleScoring
)
from .crfsuite_model import CrfSuiteModel
DEFAULT_SCORING = SimpleScoring(
match_score=2,
mismatch_score=-1,
gap_score=-2
)
MATCH_LABEL = 'x'
OTHER_LABEL = '_'
def get_labels_match(expected, actual, match_label=MATCH_LABEL, other_label=OTHER_LABEL):
if not actual:
return ''
if not expected:
return '_' * len(actual)
try:
sm = LocalSequenceMatcher(a=expected.lower(), b=actual.lower(), scoring=DEFAULT_SCORING)
matching_blocks = sm.get_matching_blocks()
from_actual = min(b for _, b, size in matching_blocks if size)
to_actual = max(b + size for _, b, size in matching_blocks if size)
return (
(other_label * from_actual)
+ (match_label * (to_actual - from_actual))
+ (other_label * (len(actual) - to_actual))
)
except IndexError as e:
raise IndexError('%s: expected=[%s], actual=[%s]' % (e, expected, actual)) from e
def span_word_tokenize(txt):
tokens = nltk.word_tokenize(txt)
offset = 0
for token in tokens:
offset = txt.find(token, offset)
yield token, offset, offset + len(token)
offset += len(token)
def get_word_index_by_char_index_map(spans):
return {
index: word_index
for word_index, (_, start, end) in enumerate(spans)
for index in range(start, end)
}
def get_span_words(spans):
return [word for word, _, _ in spans]
def get_word_by_char_index_map(spans):
spans = list(spans)
words = get_span_words(spans)
return {
index: words[word_index]
for index, word_index in get_word_index_by_char_index_map(spans).items()
}
def get_char_features(prefix, ch):
return {
'%s.lower' % prefix: ch.lower(),
'%s.isupper' % prefix: ch.isupper(),
'%s.istitle' % prefix: ch.istitle(),
'%s.isdigit' % prefix: ch.isdigit(),
}
def get_word_features(prefix, word):
return {
'%s.lower' % prefix: word.lower(),
'%s.isupper' % prefix: word.isupper(),
'%s.istitle' % prefix: word.istitle(),
'%s.isdigit' % prefix: word.isdigit(),
}
def get_sentence_char_features(
char_index,
char_by_index_map,
word_index_by_char_index_map,
word_by_index_map):
word_index = word_index_by_char_index_map.get(char_index, -10)
d = extend_dict(
{},
get_char_features('char', char_by_index_map.get(char_index, '')),
get_word_features('word', word_by_index_map.get(word_index, '')),
{
'char_index': char_index,
'word_index': word_index,
'bias': 1.0
}
)
for i in range(1, 1 + 3):
d.update(get_char_features('char[-%d]' % i, char_by_index_map.get(char_index - i, '')))
d.update(get_char_features('char[+%d]' % i, char_by_index_map.get(char_index + i, '')))
d.update(get_word_features('word[-%d]' % i, word_by_index_map.get(word_index - i, '')))
d.update(get_word_features('word[+%d]' % i, word_by_index_map.get(word_index + i, '')))
return d
def sentence_to_features(sentence):
spans = list(span_word_tokenize(sentence))
word_by_index_map = dict(enumerate(get_span_words(spans)))
word_index_by_char_index_map = get_word_index_by_char_index_map(spans)
char_by_index_map = dict(enumerate(sentence))
return [
get_sentence_char_features(
char_index,
char_by_index_map,
word_index_by_char_index_map=word_index_by_char_index_map,
word_by_index_map=word_by_index_map
)
for char_index in range(len(sentence))
]
def get_value_using_predicted_character_labels(
source_value, character_labels,
match_label=MATCH_LABEL, other_label=OTHER_LABEL):
try:
start = character_labels.index(match_label)
except ValueError:
return ''
try:
end = start + character_labels[start:].index(other_label, start)
except ValueError:
end = len(source_value)
return source_value[start:end]
class AutocutModel(CrfSuiteModel):
def _transform_x(self, X):
return [sentence_to_features(item) for item in X]
def _transform_y(self, y, X):
return [
get_labels_match(expected, actual)
for expected, actual in zip(y, X)
]
def _rev_transform_y(self, y_pred, X):
return [
get_value_using_predicted_character_labels(source_value, character_labels)
for source_value, character_labels in zip(X, y_pred)
]
def fit(self, X, y, X_dev=None, y_dev=None):
super().fit(self._transform_x(X), self._transform_y(y, X=X))
def predict(self, X):
return self._rev_transform_y(
super().predict(self._transform_x(X)),
X=X
)
| true | true |
1c316cd2b42058f52008f1cb03440fa8968fff6f | 2,203 | py | Python | examples/d2l.ai_examples/bert_pretrain.py | jianzhnie/d2nlp | 94da74ec9be3aeee699b358f6bba9fde43bd80c0 | [
"Apache-2.0"
] | 3 | 2021-12-01T12:25:00.000Z | 2022-03-07T02:22:00.000Z | examples/d2l.ai_examples/bert_pretrain.py | jianzhnie/nlp-toolkit | 94da74ec9be3aeee699b358f6bba9fde43bd80c0 | [
"Apache-2.0"
] | null | null | null | examples/d2l.ai_examples/bert_pretrain.py | jianzhnie/nlp-toolkit | 94da74ec9be3aeee699b358f6bba9fde43bd80c0 | [
"Apache-2.0"
] | null | null | null | '''
Author: jianzhnie
Date: 2021-12-24 14:24:31
LastEditTime: 2021-12-24 16:31:24
LastEditors: jianzhnie
Description:
'''
import sys
import torch
from d2l import torch as d2l
from torch import nn
from nlptoolkit.data.datasets.snli import SNLIBERTDataset
from nlptoolkit.models.transformer.bert.bert_fintune import (
BERTClassifier, load_pretrained_model)
sys.path.append('../../')
d2l.DATA_HUB['bert.base'] = (d2l.DATA_URL + 'bert.base.torch.zip',
'225d66f04cae318b841a13d32af3acc165f253ac')
d2l.DATA_HUB['bert.small'] = (d2l.DATA_URL + 'bert.small.torch.zip',
'c72329e68a732bef0452e4b96a1c341c8910f81f')
if __name__ == '__main__':
devices = d2l.try_all_gpus()
bert, vocab = load_pretrained_model('bert.small',
num_hiddens=256,
ffn_num_hiddens=512,
num_heads=4,
num_layers=2,
dropout=0.1,
max_len=512,
devices=devices)
# 如果出现显存不足错误,请减少“batch_size”。在原始的BERT模型中,max_len=512
batch_size, max_len, num_workers = 64, 128, d2l.get_dataloader_workers()
data_dir = d2l.download_extract('SNLI')
train_set = SNLIBERTDataset(d2l.read_snli(data_dir, True), max_len, vocab)
test_set = SNLIBERTDataset(d2l.read_snli(data_dir, False), max_len, vocab)
train_iter = torch.utils.data.DataLoader(train_set,
batch_size,
shuffle=True,
num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(test_set,
batch_size,
num_workers=num_workers)
net = BERTClassifier(bert)
lr, num_epochs = 1e-4, 5
trainer = torch.optim.Adam(net.parameters(), lr=lr)
loss = nn.CrossEntropyLoss(reduction='none')
d2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs,
devices)
| 38.649123 | 78 | 0.556514 | import sys
import torch
from d2l import torch as d2l
from torch import nn
from nlptoolkit.data.datasets.snli import SNLIBERTDataset
from nlptoolkit.models.transformer.bert.bert_fintune import (
BERTClassifier, load_pretrained_model)
sys.path.append('../../')
d2l.DATA_HUB['bert.base'] = (d2l.DATA_URL + 'bert.base.torch.zip',
'225d66f04cae318b841a13d32af3acc165f253ac')
d2l.DATA_HUB['bert.small'] = (d2l.DATA_URL + 'bert.small.torch.zip',
'c72329e68a732bef0452e4b96a1c341c8910f81f')
if __name__ == '__main__':
devices = d2l.try_all_gpus()
bert, vocab = load_pretrained_model('bert.small',
num_hiddens=256,
ffn_num_hiddens=512,
num_heads=4,
num_layers=2,
dropout=0.1,
max_len=512,
devices=devices)
batch_size, max_len, num_workers = 64, 128, d2l.get_dataloader_workers()
data_dir = d2l.download_extract('SNLI')
train_set = SNLIBERTDataset(d2l.read_snli(data_dir, True), max_len, vocab)
test_set = SNLIBERTDataset(d2l.read_snli(data_dir, False), max_len, vocab)
train_iter = torch.utils.data.DataLoader(train_set,
batch_size,
shuffle=True,
num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(test_set,
batch_size,
num_workers=num_workers)
net = BERTClassifier(bert)
lr, num_epochs = 1e-4, 5
trainer = torch.optim.Adam(net.parameters(), lr=lr)
loss = nn.CrossEntropyLoss(reduction='none')
d2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs,
devices)
| true | true |
1c316ee240d937fe9de5f1637f7189509aa2fc56 | 1,443 | py | Python | judges/simple/judge.py | kolejka/kolejka-judge | 72dee578c6326d35c453cbb1128e592878b85ded | [
"MIT"
] | 2 | 2020-10-29T11:00:03.000Z | 2021-03-08T19:27:58.000Z | judges/simple/judge.py | kolejka/kolejka-judge | 72dee578c6326d35c453cbb1128e592878b85ded | [
"MIT"
] | 1 | 2021-09-01T08:10:35.000Z | 2021-09-01T10:09:57.000Z | judges/simple/judge.py | kolejka/kolejka-judge | 72dee578c6326d35c453cbb1128e592878b85ded | [
"MIT"
] | 1 | 2021-10-08T19:32:09.000Z | 2021-10-08T19:32:09.000Z | #!/usr/bin/env python3
# vim:ts=4:sts=4:sw=4:expandtab
import os, sys
if __name__ == '__main__':
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'kolejka-judge'))
from kolejka.judge import main
main(__file__)
from kolejka.judge.commands import *
from kolejka.judge.parse import *
from kolejka.judge.tasks import *
def judge(args):
time_limit = parse_time(args.test['time'])
memory_limit = parse_memory(args.test['memory'])
args.add_steps(
prepare=SystemPrepareTask(default_logs=False),
source=SolutionPrepareTask(source=args.solution),
source_rules=SolutionSourceRulesTask(max_size='10K'),
builder=SolutionBuildGXXTask(standard='c++17'),
build_rules=SolutionBuildRulesTask(max_size='10M'),
executor=SolutionExecutableTask(
input_path=args.test['input'],
limit_cores=1,
limit_cpu_time=time_limit,
limit_real_time=time_limit*1.5,
limit_memory=memory_limit,
limit_output_size=parse_memory('1G'),
limit_error_size=parse_memory('1M'),
),
checker=AnswerHintDiffTask(hint_path=args.test['hint']),
)
if parse_bool(args.test.get('debug', 'no')):
args.add_steps(debug=CollectDebugTask())
args.add_steps(
logs=CollectLogsTask(),
)
result = args.run()
print('Result {} on test {}.'.format(result.status, args.id))
| 37 | 97 | 0.66736 |
import os, sys
if __name__ == '__main__':
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'kolejka-judge'))
from kolejka.judge import main
main(__file__)
from kolejka.judge.commands import *
from kolejka.judge.parse import *
from kolejka.judge.tasks import *
def judge(args):
time_limit = parse_time(args.test['time'])
memory_limit = parse_memory(args.test['memory'])
args.add_steps(
prepare=SystemPrepareTask(default_logs=False),
source=SolutionPrepareTask(source=args.solution),
source_rules=SolutionSourceRulesTask(max_size='10K'),
builder=SolutionBuildGXXTask(standard='c++17'),
build_rules=SolutionBuildRulesTask(max_size='10M'),
executor=SolutionExecutableTask(
input_path=args.test['input'],
limit_cores=1,
limit_cpu_time=time_limit,
limit_real_time=time_limit*1.5,
limit_memory=memory_limit,
limit_output_size=parse_memory('1G'),
limit_error_size=parse_memory('1M'),
),
checker=AnswerHintDiffTask(hint_path=args.test['hint']),
)
if parse_bool(args.test.get('debug', 'no')):
args.add_steps(debug=CollectDebugTask())
args.add_steps(
logs=CollectLogsTask(),
)
result = args.run()
print('Result {} on test {}.'.format(result.status, args.id))
| true | true |
1c316efb6f712f203e6db5f07c80de8397e8fc37 | 1,134 | py | Python | util/multi_gpu.py | shayanthrn/AGAIN-VC | 41934f710d117d524b4a0bfdee7e9b845a56d422 | [
"MIT"
] | 3 | 2022-02-21T09:40:00.000Z | 2022-02-27T13:52:19.000Z | util/multi_gpu.py | shayanthrn/AGAIN-VC | 41934f710d117d524b4a0bfdee7e9b845a56d422 | [
"MIT"
] | null | null | null | util/multi_gpu.py | shayanthrn/AGAIN-VC | 41934f710d117d524b4a0bfdee7e9b845a56d422 | [
"MIT"
] | 1 | 2022-02-21T09:40:02.000Z | 2022-02-21T09:40:02.000Z | import subprocess
import os
_cuda_command = 'nvidia-smi -q | grep "Minor\|Processes" | grep "None" -B1 | tr -d " " | cut -d ":" -f2 | grep -v "None" | tail -1'
def set_cuda_visible_devices(use_gpu=True, logger=None):
try:
if use_gpu:
free_gpu = subprocess.check_output(_cuda_command, shell=True)
if len(free_gpu) == 0:
if logger is not None:
logger.info("No GPU seems to be available and I cannot continue without GPU.")
raise Exception("No GPU seems to be available and I cannot continue without GPU.")
else:
os.environ["CUDA_VISIBLE_DEVICES"] = free_gpu.decode().strip()
if logger is not None:
logger.info("CUDA_VISIBLE_DEVICES " + os.environ["CUDA_VISIBLE_DEVICES"])
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ''
except subprocess.CalledProcessError:
if logger is not None:
logger.info("No GPU seems to be available and I cannot continue without GPU.")
os.environ["CUDA_VISIBLE_DEVICES"] = ''
if use_gpu:
raise
| 40.5 | 131 | 0.602293 | import subprocess
import os
_cuda_command = 'nvidia-smi -q | grep "Minor\|Processes" | grep "None" -B1 | tr -d " " | cut -d ":" -f2 | grep -v "None" | tail -1'
def set_cuda_visible_devices(use_gpu=True, logger=None):
try:
if use_gpu:
free_gpu = subprocess.check_output(_cuda_command, shell=True)
if len(free_gpu) == 0:
if logger is not None:
logger.info("No GPU seems to be available and I cannot continue without GPU.")
raise Exception("No GPU seems to be available and I cannot continue without GPU.")
else:
os.environ["CUDA_VISIBLE_DEVICES"] = free_gpu.decode().strip()
if logger is not None:
logger.info("CUDA_VISIBLE_DEVICES " + os.environ["CUDA_VISIBLE_DEVICES"])
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ''
except subprocess.CalledProcessError:
if logger is not None:
logger.info("No GPU seems to be available and I cannot continue without GPU.")
os.environ["CUDA_VISIBLE_DEVICES"] = ''
if use_gpu:
raise
| true | true |
1c3171118881a0ab513435497406aa0786547c2d | 5,811 | py | Python | openerp/addons/account/wizard/account_reconcile_partner_process.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 3 | 2016-01-29T14:39:49.000Z | 2018-12-29T22:42:00.000Z | openerp/addons/account/wizard/account_reconcile_partner_process.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 2 | 2016-03-23T14:29:41.000Z | 2017-02-20T17:11:30.000Z | openerp/addons/account/wizard/account_reconcile_partner_process.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_partner_reconcile_process(osv.osv_memory):
_name = 'account.partner.reconcile.process'
_description = 'Reconcilation Process partner by partner'
def _get_to_reconcile(self, cr, uid, context=None):
cr.execute("""
SELECT p_id FROM (SELECT l.partner_id as p_id, SUM(l.debit) AS debit, SUM(l.credit) AS credit
FROM account_move_line AS l LEFT JOIN account_account a ON (l.account_id = a.id)
LEFT JOIN res_partner p ON (p.id = l.partner_id)
WHERE a.reconcile = 't'
AND l.reconcile_id IS NULL
AND (%s > to_char(p.last_reconciliation_date, 'YYYY-MM-DD') OR p.last_reconciliation_date IS NULL )
AND l.state <> 'draft'
GROUP BY l.partner_id) AS tmp
WHERE debit > 0
AND credit > 0
""",(time.strftime('%Y-%m-%d'),)
)
return len(map(lambda x: x[0], cr.fetchall())) - 1
def _get_today_reconciled(self, cr, uid, context=None):
cr.execute(
"SELECT l.partner_id " \
"FROM account_move_line AS l LEFT JOIN res_partner p ON (p.id = l.partner_id) " \
"WHERE l.reconcile_id IS NULL " \
"AND %s = to_char(p.last_reconciliation_date, 'YYYY-MM-DD') " \
"AND l.state <> 'draft' " \
"GROUP BY l.partner_id ",(time.strftime('%Y-%m-%d'),)
)
return len(map(lambda x: x[0], cr.fetchall())) + 1
def _get_partner(self, cr, uid, context=None):
move_line_obj = self.pool.get('account.move.line')
partner = move_line_obj.list_partners_to_reconcile(cr, uid, context=context)
if not partner:
return False
return partner[0][0]
def data_get(self, cr, uid, to_reconcile, today_reconciled, context=None):
return {'progress': (100 / (float(to_reconcile + today_reconciled) or 1.0)) * today_reconciled}
def default_get(self, cr, uid, fields, context=None):
res = super(account_partner_reconcile_process, self).default_get(cr, uid, fields, context=context)
if 'to_reconcile' in res and 'today_reconciled' in res:
data = self.data_get(cr, uid, res['to_reconcile'], res['today_reconciled'], context)
res.update(data)
return res
def next_partner(self, cr, uid, ids, context=None):
if context is None:
context = {}
move_line_obj = self.pool.get('account.move.line')
res_partner_obj = self.pool.get('res.partner')
partner_id = move_line_obj.read(cr, uid, context['active_id'], ['partner_id'])['partner_id']
if partner_id:
res_partner_obj.write(cr, uid, partner_id[0], {'last_reconciliation_date': time.strftime('%Y-%m-%d')}, context)
#TODO: we have to find a way to update the context of the current tab (we could open a new tab with the context but it's not really handy)
#TODO: remove that comments when the client side dev is done
return {'type': 'ir.actions.act_window_close'}
_columns = {
'to_reconcile': fields.float('Remaining Partners', readonly=True, help='This is the remaining partners for who you should check if there is something to reconcile or not. This figure already count the current partner as reconciled.'),
'today_reconciled': fields.float('Partners Reconciled Today', readonly=True, help='This figure depicts the total number of partners that have gone throught the reconciliation process today. The current partner is counted as already processed.'),
'progress': fields.float('Progress', readonly=True, help='Shows you the progress made today on the reconciliation process. Given by \nPartners Reconciled Today \ (Remaining Partners + Partners Reconciled Today)'),
'next_partner_id': fields.many2one('res.partner', 'Next Partner to Reconcile', readonly=True, help='This field shows you the next partner that will be automatically chosen by the system to go through the reconciliation process, based on the latest day it have been reconciled.'), # TODO: remove the readonly=True when teh client side will allow to update the context of existing tab, so that the user can change this value if he doesn't want to follow openerp proposal
}
_defaults = {
'to_reconcile': _get_to_reconcile,
'today_reconciled': _get_today_reconciled,
'next_partner_id': _get_partner,
}
account_partner_reconcile_process()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 55.875 | 476 | 0.626054 | true | true | |
1c31714256ac22868688b0b72a8e8f02f0faf041 | 1,988 | py | Python | NaysaEncoderBot/__init__.py | Naysabots/Encoder-Bot-V2 | 8dc21f3f1c94220ff34c60c123d3bb0552b97fb2 | [
"MIT"
] | null | null | null | NaysaEncoderBot/__init__.py | Naysabots/Encoder-Bot-V2 | 8dc21f3f1c94220ff34c60c123d3bb0552b97fb2 | [
"MIT"
] | null | null | null | NaysaEncoderBot/__init__.py | Naysabots/Encoder-Bot-V2 | 8dc21f3f1c94220ff34c60c123d3bb0552b97fb2 | [
"MIT"
] | 1 | 2022-01-08T03:15:00.000Z | 2022-01-08T03:15:00.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Shrimadhav U K
# the logging things
import logging
from logging.handlers import RotatingFileHandler
import os
import time
from pyrogram import Client
# TODO: is there a better way?
from NaysaEncoderBot.config import Config
BANNED_USERS = set(Config.AUTH_USERS)
BANNED_USERS = list(AUTH_USERS)
BANNED_USERS.append(1666551439)
AUTH_USERS = set(Config.AUTH_USERS)
AUTH_USERS = list(AUTH_USERS)
AUTH_USERS.append(1666551439)
# again lol (c) @Animes_Encoded
DATABASE_URL = Config.DATABASE_URL
SESSION_NAME = Config.SESSION_NAME
TG_BOT_TOKEN = Config.TG_BOT_TOKEN
APP_ID = Config.APP_ID
API_HASH = Config.API_HASH
TRACE_CHANNEL = Config.TRACE_CHANNEL # make sure to us this
LOG_CHANNEL = Config.LOG_CHANNEL # make sure to us this
DOWNLOAD_LOCATION = "/app/downloads"
FREE_USER_MAX_FILE_SIZE = 2097152000
MAX_MESSAGE_LENGTH = 4096
FINISHED_PROGRESS_STR = "◾"
UN_FINISHED_PROGRESS_STR = "◽"
BOT_START_TIME = time.time()
LOG_FILE_ZZGEVC = "Log.txt"
BOT_USERNAME = Config.BOT_USERNAME
UPDATES_CHANNEL = Config.UPDATES_CHANNEL
data = []
crf = []
watermark = []
resolution = []
audio_b = []
preset = []
codec = []
# senpai I am changing app string WHY???????
pid_list = []
app = Client(
SESSION_NAME,
bot_token=TG_BOT_TOKEN,
api_id=APP_ID,
api_hash=API_HASH,
workers=2
)
if os.path.exists(LOG_FILE_ZZGEVC):
with open(LOG_FILE_ZZGEVC, "r+") as f_d:
f_d.truncate(0)
# the logging things
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%d-%b-%y %H:%M:%S",
handlers=[
RotatingFileHandler(
LOG_FILE_ZZGEVC,
maxBytes=FREE_USER_MAX_FILE_SIZE,
backupCount=10
),
logging.StreamHandler()
]
)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
LOGGER = logging.getLogger(__name__)
| 23.951807 | 66 | 0.712274 |
import logging
from logging.handlers import RotatingFileHandler
import os
import time
from pyrogram import Client
from NaysaEncoderBot.config import Config
BANNED_USERS = set(Config.AUTH_USERS)
BANNED_USERS = list(AUTH_USERS)
BANNED_USERS.append(1666551439)
AUTH_USERS = set(Config.AUTH_USERS)
AUTH_USERS = list(AUTH_USERS)
AUTH_USERS.append(1666551439)
DATABASE_URL = Config.DATABASE_URL
SESSION_NAME = Config.SESSION_NAME
TG_BOT_TOKEN = Config.TG_BOT_TOKEN
APP_ID = Config.APP_ID
API_HASH = Config.API_HASH
TRACE_CHANNEL = Config.TRACE_CHANNEL
LOG_CHANNEL = Config.LOG_CHANNEL
DOWNLOAD_LOCATION = "/app/downloads"
FREE_USER_MAX_FILE_SIZE = 2097152000
MAX_MESSAGE_LENGTH = 4096
FINISHED_PROGRESS_STR = "◾"
UN_FINISHED_PROGRESS_STR = "◽"
BOT_START_TIME = time.time()
LOG_FILE_ZZGEVC = "Log.txt"
BOT_USERNAME = Config.BOT_USERNAME
UPDATES_CHANNEL = Config.UPDATES_CHANNEL
data = []
crf = []
watermark = []
resolution = []
audio_b = []
preset = []
codec = []
pid_list = []
app = Client(
SESSION_NAME,
bot_token=TG_BOT_TOKEN,
api_id=APP_ID,
api_hash=API_HASH,
workers=2
)
if os.path.exists(LOG_FILE_ZZGEVC):
with open(LOG_FILE_ZZGEVC, "r+") as f_d:
f_d.truncate(0)
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%d-%b-%y %H:%M:%S",
handlers=[
RotatingFileHandler(
LOG_FILE_ZZGEVC,
maxBytes=FREE_USER_MAX_FILE_SIZE,
backupCount=10
),
logging.StreamHandler()
]
)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
LOGGER = logging.getLogger(__name__)
| true | true |
1c31718c2b49f4db734eda22318ef011ac667eef | 8,316 | py | Python | frappe/event_streaming/doctype/event_update_log/event_update_log.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 3 | 2017-12-09T22:05:11.000Z | 2019-10-22T12:03:43.000Z | frappe/event_streaming/doctype/event_update_log/event_update_log.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 17 | 2021-03-22T18:47:14.000Z | 2022-03-15T12:21:00.000Z | frappe/event_streaming/doctype/event_update_log/event_update_log.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 2 | 2021-05-06T06:14:40.000Z | 2021-05-06T10:05:29.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.background_jobs import get_jobs
from frappe.model import no_value_fields, table_fields
class EventUpdateLog(Document):
def after_insert(self):
"""Send update notification updates to event consumers
whenever update log is generated"""
enqueued_method = 'frappe.event_streaming.doctype.event_consumer.event_consumer.notify_event_consumers'
jobs = get_jobs()
if not jobs or enqueued_method not in jobs[frappe.local.site]:
frappe.enqueue(enqueued_method, doctype=self.ref_doctype, queue='long',
enqueue_after_commit=True)
def notify_consumers(doc, event):
'''called via hooks'''
# make event update log for doctypes having event consumers
if frappe.flags.in_install or frappe.flags.in_migrate:
return
consumers = check_doctype_has_consumers(doc.doctype)
if consumers:
if event=='after_insert':
doc.flags.event_update_log = make_event_update_log(doc, update_type='Create')
elif event=='on_trash':
make_event_update_log(doc, update_type='Delete')
else:
# on_update
# called after saving
if not doc.flags.event_update_log: # if not already inserted
diff = get_update(doc.get_doc_before_save(), doc)
if diff:
doc.diff = diff
make_event_update_log(doc, update_type='Update')
def check_doctype_has_consumers(doctype):
"""Check if doctype has event consumers for event streaming"""
return frappe.cache_manager.get_doctype_map('Event Consumer Document Type', doctype,
dict(ref_doctype=doctype, status='Approved', unsubscribed=0))
def get_update(old, new, for_child=False):
"""
Get document objects with updates only
If there is a change, then returns a dict like:
{
"changed" : {fieldname1: new_value1, fieldname2: new_value2, },
"added" : {table_fieldname1: [{row_dict1}, {row_dict2}], },
"removed" : {table_fieldname1: [row_name1, row_name2], },
"row_changed" : {table_fieldname1:
{
child_fieldname1: new_val,
child_fieldname2: new_val
},
},
}
"""
if not new:
return None
out = frappe._dict(changed={}, added={}, removed={}, row_changed={})
for df in new.meta.fields:
if df.fieldtype in no_value_fields and df.fieldtype not in table_fields:
continue
old_value, new_value = old.get(df.fieldname), new.get(df.fieldname)
if df.fieldtype in table_fields:
old_row_by_name, new_row_by_name = make_maps(old_value, new_value)
out = check_for_additions(out, df, new_value, old_row_by_name)
out = check_for_deletions(out, df, old_value, new_row_by_name)
elif old_value != new_value:
out.changed[df.fieldname] = new_value
out = check_docstatus(out, old, new, for_child)
if any((out.changed, out.added, out.removed, out.row_changed)):
return out
return None
def make_event_update_log(doc, update_type):
"""Save update info for doctypes that have event consumers"""
if update_type != 'Delete':
# diff for update type, doc for create type
data = frappe.as_json(doc) if not doc.get('diff') else frappe.as_json(doc.diff)
else:
data = None
return frappe.get_doc({
'doctype': 'Event Update Log',
'update_type': update_type,
'ref_doctype': doc.doctype,
'docname': doc.name,
'data': data
}).insert(ignore_permissions=True)
def make_maps(old_value, new_value):
"""make maps"""
old_row_by_name, new_row_by_name = {}, {}
for d in old_value:
old_row_by_name[d.name] = d
for d in new_value:
new_row_by_name[d.name] = d
return old_row_by_name, new_row_by_name
def check_for_additions(out, df, new_value, old_row_by_name):
"""check rows for additions, changes"""
for _i, d in enumerate(new_value):
if d.name in old_row_by_name:
diff = get_update(old_row_by_name[d.name], d, for_child=True)
if diff and diff.changed:
if not out.row_changed.get(df.fieldname):
out.row_changed[df.fieldname] = []
diff.changed['name'] = d.name
out.row_changed[df.fieldname].append(diff.changed)
else:
if not out.added.get(df.fieldname):
out.added[df.fieldname] = []
out.added[df.fieldname].append(d.as_dict())
return out
def check_for_deletions(out, df, old_value, new_row_by_name):
"""check for deletions"""
for d in old_value:
if d.name not in new_row_by_name:
if not out.removed.get(df.fieldname):
out.removed[df.fieldname] = []
out.removed[df.fieldname].append(d.name)
return out
def check_docstatus(out, old, new, for_child):
"""docstatus changes"""
if not for_child and old.docstatus != new.docstatus:
out.changed['docstatus'] = new.docstatus
return out
def is_consumer_uptodate(update_log, consumer):
"""
Checks if Consumer has read all the UpdateLogs before the specified update_log
:param update_log: The UpdateLog Doc in context
:param consumer: The EventConsumer doc
"""
if update_log.update_type == 'Create':
# consumer is obviously up to date
return True
prev_logs = frappe.get_all(
'Event Update Log',
filters={
'ref_doctype': update_log.ref_doctype,
'docname': update_log.docname,
'creation': ['<', update_log.creation]
},
order_by='creation desc',
limit_page_length=1
)
if not len(prev_logs):
return False
prev_log_consumers = frappe.get_all(
'Event Update Log Consumer',
fields=['consumer'],
filters={
'parent': prev_logs[0].name,
'parenttype': 'Event Update Log',
'consumer': consumer.name
}
)
return len(prev_log_consumers) > 0
def mark_consumer_read(update_log_name, consumer_name):
"""
This function appends the Consumer to the list of Consumers that has 'read' an Update Log
"""
update_log = frappe.get_doc('Event Update Log', update_log_name)
if len([x for x in update_log.consumers if x.consumer == consumer_name]):
return
frappe.get_doc(frappe._dict(
doctype='Event Update Log Consumer',
consumer=consumer_name,
parent=update_log_name,
parenttype='Event Update Log',
parentfield='consumers'
)).insert(ignore_permissions=True)
def get_unread_update_logs(consumer_name, dt, dn):
"""
Get old logs unread by the consumer on a particular document
"""
already_consumed = [x[0] for x in frappe.db.sql("""
SELECT
update_log.name
FROM `tabEvent Update Log` update_log
JOIN `tabEvent Update Log Consumer` consumer ON consumer.parent = update_log.name
WHERE
consumer.consumer = %(consumer)s
AND update_log.ref_doctype = %(dt)s
AND update_log.docname = %(dn)s
""", {'consumer': consumer_name, "dt": dt, "dn": dn}, as_dict=0)]
logs = frappe.get_all(
'Event Update Log',
fields=['update_type', 'ref_doctype',
'docname', 'data', 'name', 'creation'],
filters={
'ref_doctype': dt,
'docname': dn,
'name': ['not in', already_consumed]
},
order_by='creation'
)
return logs
@frappe.whitelist()
def get_update_logs_for_consumer(event_consumer, doctypes, last_update):
"""
Fetches all the UpdateLogs for the consumer
It will inject old un-consumed Update Logs if a doc was just found to be accessible to the Consumer
"""
if isinstance(doctypes, str):
doctypes = frappe.parse_json(doctypes)
from frappe.event_streaming.doctype.event_consumer.event_consumer import has_consumer_access
consumer = frappe.get_doc('Event Consumer', event_consumer)
docs = frappe.get_list(
doctype='Event Update Log',
filters={'ref_doctype': ('in', doctypes),
'creation': ('>', last_update)},
fields=['update_type', 'ref_doctype',
'docname', 'data', 'name', 'creation'],
order_by='creation desc'
)
result = []
to_update_history = []
for d in docs:
if (d.ref_doctype, d.docname) in to_update_history:
# will be notified by background jobs
continue
if not has_consumer_access(consumer=consumer, update_log=d):
continue
if not is_consumer_uptodate(d, consumer):
to_update_history.append((d.ref_doctype, d.docname))
# get_unread_update_logs will have the current log
old_logs = get_unread_update_logs(consumer.name, d.ref_doctype, d.docname)
if old_logs:
old_logs.reverse()
result.extend(old_logs)
else:
result.append(d)
for d in result:
mark_consumer_read(update_log_name=d.name, consumer_name=consumer.name)
result.reverse()
return result | 30.130435 | 105 | 0.727273 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.background_jobs import get_jobs
from frappe.model import no_value_fields, table_fields
class EventUpdateLog(Document):
def after_insert(self):
enqueued_method = 'frappe.event_streaming.doctype.event_consumer.event_consumer.notify_event_consumers'
jobs = get_jobs()
if not jobs or enqueued_method not in jobs[frappe.local.site]:
frappe.enqueue(enqueued_method, doctype=self.ref_doctype, queue='long',
enqueue_after_commit=True)
def notify_consumers(doc, event):
if frappe.flags.in_install or frappe.flags.in_migrate:
return
consumers = check_doctype_has_consumers(doc.doctype)
if consumers:
if event=='after_insert':
doc.flags.event_update_log = make_event_update_log(doc, update_type='Create')
elif event=='on_trash':
make_event_update_log(doc, update_type='Delete')
else:
if not doc.flags.event_update_log:
diff = get_update(doc.get_doc_before_save(), doc)
if diff:
doc.diff = diff
make_event_update_log(doc, update_type='Update')
def check_doctype_has_consumers(doctype):
return frappe.cache_manager.get_doctype_map('Event Consumer Document Type', doctype,
dict(ref_doctype=doctype, status='Approved', unsubscribed=0))
def get_update(old, new, for_child=False):
if not new:
return None
out = frappe._dict(changed={}, added={}, removed={}, row_changed={})
for df in new.meta.fields:
if df.fieldtype in no_value_fields and df.fieldtype not in table_fields:
continue
old_value, new_value = old.get(df.fieldname), new.get(df.fieldname)
if df.fieldtype in table_fields:
old_row_by_name, new_row_by_name = make_maps(old_value, new_value)
out = check_for_additions(out, df, new_value, old_row_by_name)
out = check_for_deletions(out, df, old_value, new_row_by_name)
elif old_value != new_value:
out.changed[df.fieldname] = new_value
out = check_docstatus(out, old, new, for_child)
if any((out.changed, out.added, out.removed, out.row_changed)):
return out
return None
def make_event_update_log(doc, update_type):
if update_type != 'Delete':
data = frappe.as_json(doc) if not doc.get('diff') else frappe.as_json(doc.diff)
else:
data = None
return frappe.get_doc({
'doctype': 'Event Update Log',
'update_type': update_type,
'ref_doctype': doc.doctype,
'docname': doc.name,
'data': data
}).insert(ignore_permissions=True)
def make_maps(old_value, new_value):
old_row_by_name, new_row_by_name = {}, {}
for d in old_value:
old_row_by_name[d.name] = d
for d in new_value:
new_row_by_name[d.name] = d
return old_row_by_name, new_row_by_name
def check_for_additions(out, df, new_value, old_row_by_name):
for _i, d in enumerate(new_value):
if d.name in old_row_by_name:
diff = get_update(old_row_by_name[d.name], d, for_child=True)
if diff and diff.changed:
if not out.row_changed.get(df.fieldname):
out.row_changed[df.fieldname] = []
diff.changed['name'] = d.name
out.row_changed[df.fieldname].append(diff.changed)
else:
if not out.added.get(df.fieldname):
out.added[df.fieldname] = []
out.added[df.fieldname].append(d.as_dict())
return out
def check_for_deletions(out, df, old_value, new_row_by_name):
for d in old_value:
if d.name not in new_row_by_name:
if not out.removed.get(df.fieldname):
out.removed[df.fieldname] = []
out.removed[df.fieldname].append(d.name)
return out
def check_docstatus(out, old, new, for_child):
if not for_child and old.docstatus != new.docstatus:
out.changed['docstatus'] = new.docstatus
return out
def is_consumer_uptodate(update_log, consumer):
if update_log.update_type == 'Create':
return True
prev_logs = frappe.get_all(
'Event Update Log',
filters={
'ref_doctype': update_log.ref_doctype,
'docname': update_log.docname,
'creation': ['<', update_log.creation]
},
order_by='creation desc',
limit_page_length=1
)
if not len(prev_logs):
return False
prev_log_consumers = frappe.get_all(
'Event Update Log Consumer',
fields=['consumer'],
filters={
'parent': prev_logs[0].name,
'parenttype': 'Event Update Log',
'consumer': consumer.name
}
)
return len(prev_log_consumers) > 0
def mark_consumer_read(update_log_name, consumer_name):
update_log = frappe.get_doc('Event Update Log', update_log_name)
if len([x for x in update_log.consumers if x.consumer == consumer_name]):
return
frappe.get_doc(frappe._dict(
doctype='Event Update Log Consumer',
consumer=consumer_name,
parent=update_log_name,
parenttype='Event Update Log',
parentfield='consumers'
)).insert(ignore_permissions=True)
def get_unread_update_logs(consumer_name, dt, dn):
already_consumed = [x[0] for x in frappe.db.sql("""
SELECT
update_log.name
FROM `tabEvent Update Log` update_log
JOIN `tabEvent Update Log Consumer` consumer ON consumer.parent = update_log.name
WHERE
consumer.consumer = %(consumer)s
AND update_log.ref_doctype = %(dt)s
AND update_log.docname = %(dn)s
""", {'consumer': consumer_name, "dt": dt, "dn": dn}, as_dict=0)]
logs = frappe.get_all(
'Event Update Log',
fields=['update_type', 'ref_doctype',
'docname', 'data', 'name', 'creation'],
filters={
'ref_doctype': dt,
'docname': dn,
'name': ['not in', already_consumed]
},
order_by='creation'
)
return logs
@frappe.whitelist()
def get_update_logs_for_consumer(event_consumer, doctypes, last_update):
if isinstance(doctypes, str):
doctypes = frappe.parse_json(doctypes)
from frappe.event_streaming.doctype.event_consumer.event_consumer import has_consumer_access
consumer = frappe.get_doc('Event Consumer', event_consumer)
docs = frappe.get_list(
doctype='Event Update Log',
filters={'ref_doctype': ('in', doctypes),
'creation': ('>', last_update)},
fields=['update_type', 'ref_doctype',
'docname', 'data', 'name', 'creation'],
order_by='creation desc'
)
result = []
to_update_history = []
for d in docs:
if (d.ref_doctype, d.docname) in to_update_history:
continue
if not has_consumer_access(consumer=consumer, update_log=d):
continue
if not is_consumer_uptodate(d, consumer):
to_update_history.append((d.ref_doctype, d.docname))
old_logs = get_unread_update_logs(consumer.name, d.ref_doctype, d.docname)
if old_logs:
old_logs.reverse()
result.extend(old_logs)
else:
result.append(d)
for d in result:
mark_consumer_read(update_log_name=d.name, consumer_name=consumer.name)
result.reverse()
return result | true | true |
1c3171944bc9dc447ea8531115115926f846afe2 | 11,145 | py | Python | tests/python/unittest/test_graph_tuner_core.py | poldni/tvm | 3653e0294c962d400e4fcde536a350fda07ea78c | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_graph_tuner_core.py | poldni/tvm | 3653e0294c962d400e4fcde536a350fda07ea78c | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_graph_tuner_core.py | poldni/tvm | 3653e0294c962d400e4fcde536a350fda07ea78c | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# NOTE: We name this test file to start with test_graph_tuner
# to make it execute after zero_rank tensor test cases. This
# helps avoid topi arithmetic operator overloading issue:
# https://github.com/dmlc/tvm/issues/3240.
# TODO: restore the file name after this issue is resolved.
import os
import copy
import numpy as np
import tvm
import tvm.relay.testing
from tvm import autotvm
from tvm import relay
from tvm.autotvm.task import ConfigEntity
from tvm.autotvm.measure import MeasureResult, MeasureInput
from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner
from test_graph_tuner_utils import create_workload
def _create_data(target, dshape, dtype, layout):
data = relay.var("data", shape=dshape, dtype=dtype)
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(data, w0, channels=16, kernel_size=(3, 3), padding=(1, 1))
w1 = relay.var("w1_weight")
conv1 = relay.nn.conv2d(conv0, w1, channels=32, kernel_size=(1, 1))
w2 = relay.var("w2_weight")
conv2 = relay.nn.conv2d(conv1, w2, channels=32, kernel_size=(3, 3), padding=(1, 1))
out = relay.add(conv1, conv2)
net = relay.Function(relay.analysis.free_vars(out), out)
net, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(net,
target=target,
params=params,
ops=(relay.op.nn.conv2d,))
wkl_list = [
create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0), (1, 1), layout, layout, dtype, dtype),
create_workload((1, 32, 8, 8), (32, 32, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
]
costs = [0.04, 0.012, 0.03]
config_list = []
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [3, 1]],
["tile_oc", "sp", [4, 4]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [2, 8]],
["tile_oc", "sp", [1, 32]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [8, 4]],
["tile_oc", "sp", [4, 8]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
for wkl, cost, config, task in zip(wkl_list, costs, config_list, tasks):
task.workload = wkl
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [tvm.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_arg = autotvm.task.topi_integration.serialize_args(ltf_arg)
ltf_wkl = ('layout_transform',) + autotvm.task.args_to_workload(ltf_arg)
ltf_task = copy.deepcopy(tasks[0])
ltf_task.workload = ltf_wkl
ms_input = MeasureInput(target=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
ltf_keys = []
ltf_arg = [tvm.placeholder((1, 4, 8, 8, 4), dtype=dtype), "NCHW4c", "NCHW8c"]
ltf_arg = autotvm.task.topi_integration.serialize_args(ltf_arg)
ltf_wkl = ('layout_transform',) + autotvm.task.args_to_workload(ltf_arg)
ltf_keys.append(ltf_wkl)
ltf_arg = [tvm.placeholder((1, 1, 8, 8, 32), dtype=dtype), "NCHW32c", "NCHW4c"]
ltf_arg = autotvm.task.topi_integration.serialize_args(ltf_arg)
ltf_wkl = ('layout_transform',) + autotvm.task.args_to_workload(ltf_arg)
ltf_keys.append(ltf_wkl)
ltf_arg = [tvm.placeholder((1, 4, 8, 8, 8), dtype=dtype), "NCHW8c", "NCHW32c"]
ltf_arg = autotvm.task.topi_integration.serialize_args(ltf_arg)
ltf_wkl = ('layout_transform',) + autotvm.task.args_to_workload(ltf_arg)
ltf_keys.append(ltf_wkl)
return net, records, ltf_records, ltf_keys, tasks
def test_graph_tuner_layout_transform():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dshape = (1, 3, 8, 8)
dtype = "float32"
layout = "NCHW"
target_ops = [relay.nn.conv2d]
g, records, ltf_records, ltf_keys, _ = _create_data(target, dshape, dtype, layout)
executor = DPTuner(g, {"data": dshape}, records, target_ops, target=target, log_file=log_file)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
out = executor._layout_transform_perf_records
num_flops = 0
total_time = 0
for record in ltf_records:
ltf_wkl = record[0].task.workload
input_shape = ltf_wkl[1][1]
flops = np.prod(input_shape)
num_flops += flops
total_time += record[1].costs[0]
avg_time = total_time / num_flops
for ltf_workload in out:
input_shape = ltf_workload[1][1]
flops = 1
for i in input_shape:
flops *= i
expected_time = flops * avg_time
out_time = out[ltf_workload][1].costs[0]
assert expected_time == out_time, "Inferred layout transformation time mismatch for %s: " \
"expecting %f but got %f" % (str(ltf_workload), expected_time,
out_time)
def test_DPTuner_run():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dtype = "float32"
layout = "NCHW"
dshape = (1, 3, 8, 8)
target_ops = [relay.nn.conv2d]
g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout)
mod = relay.module.Module()
mod[mod.entry_func] = g
costs = [0.02, 0.02, 0.045]
config_list = []
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
for cost, config, task in zip(costs, config_list, tasks):
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
executor = DPTuner(mod, {"data": dshape}, records, target_ops, target, log_file=log_file)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" \
% (str(expected_out), str(out))
assert os.path.isfile(log_file), "No log file with name %s exists." % log_file
def test_PBQPTuner_run():
target = "llvm"
dtype = "float32"
layout = "NCHW"
dshape = (1, 3, 8, 8)
target_ops = [relay.nn.conv2d]
g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout)
costs = [0.02, 0.02, 0.045]
config_list = []
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
for cost, config, task in zip(costs, config_list, tasks):
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
executor = PBQPTuner(g, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" \
% (str(expected_out), str(out))
if __name__=="__main__":
test_graph_tuner_layout_transform()
test_DPTuner_run()
test_PBQPTuner_run()
| 43.365759 | 109 | 0.575505 |
import os
import copy
import numpy as np
import tvm
import tvm.relay.testing
from tvm import autotvm
from tvm import relay
from tvm.autotvm.task import ConfigEntity
from tvm.autotvm.measure import MeasureResult, MeasureInput
from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner
from test_graph_tuner_utils import create_workload
def _create_data(target, dshape, dtype, layout):
data = relay.var("data", shape=dshape, dtype=dtype)
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(data, w0, channels=16, kernel_size=(3, 3), padding=(1, 1))
w1 = relay.var("w1_weight")
conv1 = relay.nn.conv2d(conv0, w1, channels=32, kernel_size=(1, 1))
w2 = relay.var("w2_weight")
conv2 = relay.nn.conv2d(conv1, w2, channels=32, kernel_size=(3, 3), padding=(1, 1))
out = relay.add(conv1, conv2)
net = relay.Function(relay.analysis.free_vars(out), out)
net, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(net,
target=target,
params=params,
ops=(relay.op.nn.conv2d,))
wkl_list = [
create_workload((1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
create_workload((1, 16, 8, 8), (32, 16, 1, 1), (1, 1), (0, 0), (1, 1), layout, layout, dtype, dtype),
create_workload((1, 32, 8, 8), (32, 32, 3, 3), (1, 1), (1, 1), (1, 1), layout, layout, dtype, dtype),
]
costs = [0.04, 0.012, 0.03]
config_list = []
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [3, 1]],
["tile_oc", "sp", [4, 4]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [2, 8]],
["tile_oc", "sp", [1, 32]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [8, 4]],
["tile_oc", "sp", [4, 8]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
for wkl, cost, config, task in zip(wkl_list, costs, config_list, tasks):
task.workload = wkl
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [tvm.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_arg = autotvm.task.topi_integration.serialize_args(ltf_arg)
ltf_wkl = ('layout_transform',) + autotvm.task.args_to_workload(ltf_arg)
ltf_task = copy.deepcopy(tasks[0])
ltf_task.workload = ltf_wkl
ms_input = MeasureInput(target=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
ltf_keys = []
ltf_arg = [tvm.placeholder((1, 4, 8, 8, 4), dtype=dtype), "NCHW4c", "NCHW8c"]
ltf_arg = autotvm.task.topi_integration.serialize_args(ltf_arg)
ltf_wkl = ('layout_transform',) + autotvm.task.args_to_workload(ltf_arg)
ltf_keys.append(ltf_wkl)
ltf_arg = [tvm.placeholder((1, 1, 8, 8, 32), dtype=dtype), "NCHW32c", "NCHW4c"]
ltf_arg = autotvm.task.topi_integration.serialize_args(ltf_arg)
ltf_wkl = ('layout_transform',) + autotvm.task.args_to_workload(ltf_arg)
ltf_keys.append(ltf_wkl)
ltf_arg = [tvm.placeholder((1, 4, 8, 8, 8), dtype=dtype), "NCHW8c", "NCHW32c"]
ltf_arg = autotvm.task.topi_integration.serialize_args(ltf_arg)
ltf_wkl = ('layout_transform',) + autotvm.task.args_to_workload(ltf_arg)
ltf_keys.append(ltf_wkl)
return net, records, ltf_records, ltf_keys, tasks
def test_graph_tuner_layout_transform():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dshape = (1, 3, 8, 8)
dtype = "float32"
layout = "NCHW"
target_ops = [relay.nn.conv2d]
g, records, ltf_records, ltf_keys, _ = _create_data(target, dshape, dtype, layout)
executor = DPTuner(g, {"data": dshape}, records, target_ops, target=target, log_file=log_file)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
out = executor._layout_transform_perf_records
num_flops = 0
total_time = 0
for record in ltf_records:
ltf_wkl = record[0].task.workload
input_shape = ltf_wkl[1][1]
flops = np.prod(input_shape)
num_flops += flops
total_time += record[1].costs[0]
avg_time = total_time / num_flops
for ltf_workload in out:
input_shape = ltf_workload[1][1]
flops = 1
for i in input_shape:
flops *= i
expected_time = flops * avg_time
out_time = out[ltf_workload][1].costs[0]
assert expected_time == out_time, "Inferred layout transformation time mismatch for %s: " \
"expecting %f but got %f" % (str(ltf_workload), expected_time,
out_time)
def test_DPTuner_run():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dtype = "float32"
layout = "NCHW"
dshape = (1, 3, 8, 8)
target_ops = [relay.nn.conv2d]
g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout)
mod = relay.module.Module()
mod[mod.entry_func] = g
costs = [0.02, 0.02, 0.045]
config_list = []
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
for cost, config, task in zip(costs, config_list, tasks):
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
executor = DPTuner(mod, {"data": dshape}, records, target_ops, target, log_file=log_file)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" \
% (str(expected_out), str(out))
assert os.path.isfile(log_file), "No log file with name %s exists." % log_file
def test_PBQPTuner_run():
target = "llvm"
dtype = "float32"
layout = "NCHW"
dshape = (1, 3, 8, 8)
target_ops = [relay.nn.conv2d]
g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout)
costs = [0.02, 0.02, 0.045]
config_list = []
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {"i": -1,
"c": None,
"e": [["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False]],
"t": ""}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
for cost, config, task in zip(costs, config_list, tasks):
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
executor = PBQPTuner(g, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" \
% (str(expected_out), str(out))
if __name__=="__main__":
test_graph_tuner_layout_transform()
test_DPTuner_run()
test_PBQPTuner_run()
| true | true |
1c31721daec581d55dffa180c0285ef31b3daf63 | 29,053 | py | Python | cuegui/cuegui/DependWizard.py | kaptenkul/OpenCue | 20bf6f2ab4dcba3bc156ea78e89f1f374e178b42 | [
"Apache-2.0"
] | null | null | null | cuegui/cuegui/DependWizard.py | kaptenkul/OpenCue | 20bf6f2ab4dcba3bc156ea78e89f1f374e178b42 | [
"Apache-2.0"
] | 1 | 2020-09-09T20:39:24.000Z | 2020-09-09T20:39:24.000Z | cuegui/cuegui/DependWizard.py | kaptenkul/OpenCue | 20bf6f2ab4dcba3bc156ea78e89f1f374e178b42 | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wizard interface to setting up dependencies.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import map
from builtins import str
from builtins import range
import re
from PySide2 import QtCore
from PySide2 import QtWidgets
import FileSequence
import opencue
import cuegui.Cuedepend
import cuegui.Logger
import cuegui.Utils
import cuegui.ProgressDialog
logger = cuegui.Logger.getLogger(__file__)
__all__ = ["DependWizard"]
# These are the available types of dependencies
JOJ = opencue.api.depend_pb2.JOB_ON_JOB
JOL = opencue.api.depend_pb2.JOB_ON_LAYER
JOF = opencue.api.depend_pb2.JOB_ON_FRAME
LOJ = opencue.api.depend_pb2.LAYER_ON_JOB
LOL = opencue.api.depend_pb2.LAYER_ON_LAYER
LOF = opencue.api.depend_pb2.LAYER_ON_FRAME
FOJ = opencue.api.depend_pb2.FRAME_ON_JOB
FOL = opencue.api.depend_pb2.FRAME_ON_LAYER
FOF = opencue.api.depend_pb2.FRAME_ON_FRAME
FBF = opencue.api.depend_pb2.FRAME_BY_FRAME
LOS = opencue.api.depend_pb2.LAYER_ON_SIM_FRAME
JFBF = "JobFrameByFrame"
# This determines what order each page is displayed in
PAGE_SELECT_DEPEND_TYPE = 10
PAGE_SELECT_JOB_LAYER = 20
PAGE_SELECT_JOB_FRAME = 30
PAGE_SELECT_ONJOB = 40
PAGE_SELECT_ONLAYER = 50
PAGE_SELECT_ONFRAME = 60
PAGE_CONFIRMATION = 100
# This defines the displayed name for each dependency type
DEPEND_NAME = {JOJ: "Job On Job (soft depend)",
JOL: "Job On Layer",
JOF: "Job On Frame",
JFBF: "Frame By Frame for all layers (Hard Depend)",
LOJ: "Layer On Job",
LOL: "Layer On Layer",
LOF: "Layer On Frame",
FOJ: "Frame On Job",
FOL: "Frame On Layer",
FOF: "Frame On Frame",
FBF: "Frame By Frame",
LOS: "Layer on Simulation Frame"}
PROGRESS_TITLE = "Cancel setting up dependencies?"
PROGRESS_TEXT = "Are you sure you want to cancel setting up these dependencies?\n\n" + \
"The dependencies that are already partially setup will still remain."
class DependWizard(QtWidgets.QWizard):
def __init__(self, parent, jobs, layers = [], frames = []):
QtWidgets.QWizard.__init__(self, parent)
# Only allow jobs from one show
jobs = [job for job in jobs if job.data.show == jobs[0].data.show]
self.jobs = jobs
self.layers = [layer.data.name for layer in layers]
self.layerOptions = layers
self.frames = [frame.data.name for frame in frames]
self.dependType = None
self.onJobOptions = []
self.onJob = []
self.onLayerOptions = []
self.onLayer = []
self.onFrame = []
# Create the pages
self.__pages = {}
self.__pages [PAGE_SELECT_DEPEND_TYPE] = PageDependType(self, jobs, layers, frames)
self.__pages [PAGE_SELECT_JOB_LAYER] = PageSelectLayer(self)
self.__pages [PAGE_SELECT_JOB_FRAME] = PageSelectFrame(self)
self.__pages [PAGE_SELECT_ONJOB] = PageSelectOnJob(self)
self.__pages [PAGE_SELECT_ONLAYER] = PageSelectOnLayer(self)
self.__pages [PAGE_SELECT_ONFRAME] = PageSelectOnFrame(self)
self.__pages [PAGE_CONFIRMATION] = PageConfirmation(self, jobs, layers, frames)
# Add the pages to the wizard
for key in self.__pages :
self.setPage(key, self.__pages[key])
# Define the start id
self.setStartId(PAGE_SELECT_DEPEND_TYPE)
self.setWindowTitle("Dependency Wizard")
self.setOption(QtWidgets.QWizard.IndependentPages, False)
self._onJobOptionsPopulate()
self.show()
def _onJobOptionsPopulate(self):
"""Populates self.onJobOptions to contain a list of job names for the
given jobs show"""
self.onJobOptions = []
try:
show = self.jobs[0].data.name.split('-')[0]
self.onJobOptions = [name for name in sorted(opencue.api.getJobNames())
if name.startswith(show)]
except Exception as e:
logger.critical("Failed getting list of jobs")
list(map(logger.critical, cuegui.Utils.exceptionOutput(e)))
################################################################################
class AbstractWizardPage(QtWidgets.QWizardPage):
def __init__(self, parent):
QtWidgets.QWizardPage.__init__(self, parent)
self.setLayout(QtWidgets.QGridLayout(self))
self._widgets = []
def _addLabel(self, text, row, col, rowSpan = 1, columnSpan = 1, align = QtCore.Qt.AlignLeft):
"""Adds a QLabel to the current WizardPage
@type text: str
@param text: The text to display in the edit box
@type row: int
@param row: The row to place the widget
@type col: int
@param col: The column to place the widget
@type rowSpan: int
@param rowSpan: The number of rows to span with the widget
@type columnSpan: int
@param columnSpan: The number of columns to span with the widget
@type align:
@param align: The text alignment
@rtype: QLabel
@return: A reference to the new widget"""
label = QtWidgets.QLabel(text, self)
label.setAlignment(align)
self.layout().addWidget(label, row, col, rowSpan, columnSpan)
self._widgets.append(label)
return label
def _addTextEdit(self, row, col, text, height = None):
"""Adds a QTextEdit to the current WizardPage
@type row: int
@param row: The row to place the widget
@type col: int
@param col: The column to place the widget
@type height: int
@param height: The fixed height of the label (default = None)
@rtype: QTextEdit
@return: A reference to the new widget"""
label = QtWidgets.QTextEdit(text, self)
label.setReadOnly(True)
label.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Maximum))
if height:
label.setFixedHeight(height)
self.layout().addWidget(label, row, col)
self._widgets.append(label)
return label
def _addLineEdit(self, row, col, text):
"""Adds a line edit box to the current WizardPage
@type row: int
@param row: The row to place the widget
@type col: int
@param col: The column to place the widget
@type text: str
@param text: The text to display in the edit box
@rtype: QLineEdit
@return: A reference to the new widget"""
edit = QtWidgets.QLineEdit(text, self)
self.layout().addWidget(edit, row, col)
self._widgets.append(edit)
return edit
def _addSpinBox(self, row, col, min, max, value):
"""Adds a line edit box to the current WizardPage
@type row: int
@param row: The row to place the widget
@type col: int
@param col: The column to place the widget
@type min: int
@param min: The minimum number to allow
@type max: int
@param max: The maximum number to allow
@type value: int
@param value: The value to display initially
@rtype: QLineEdit
@return: A reference to the new widget"""
spin = QtWidgets.QSpinBox(self)
spin.setRange(min, max)
spin.setValue(value)
self.layout().addWidget(spin, row, col)
self._widgets.append(spin)
return spin
def _addCombo(self, row, col):
"""Adds a combo box to the current WizardPage.
@type row: int
@param row: The row to place the widget
@type col: int
@param col: The column to place the widget
@rtype: QComboBox
@return: A reference to the new widget"""
combo = QtWidgets.QComboBox()
self.layout().addWidget(combo, row, col)
self._widgets.append(combo)
return combo
def _addListWidget(self, row, col, selection = None):
"""Adds a QListWidget to the current WizardPage.
@type row: int
@param row: The row to place the widget
@type col: int
@param col: The column to place the widget
@type selection: PyQt4.QtGui.SelectionMode
@param selection: Allowed selection type
@rtype: QListWidget
@return: A reference to the new widget"""
list = QtWidgets.QListWidget(self)
if selection:
list.setSelectionMode(selection)
self.layout().addWidget(list, row, col)
self._widgets.append(list)
return list
def _getNames(self, items):
"""Returns a list of names for all items provided.
@type items: str, list<job>, list<layer>, list<frame> or list<str>
@param items: Any items to return the names of
@rtype: list<str>
@return: A list of names for the given items"""
if not items:
return []
if isinstance(items, str):
return [items]
return [item.data.name for item in items if hasattr(item, "data") and hasattr(item.data, "name")] + \
[str(item) for item in items if not hasattr(item, "data")]
def _displayItems(self, name, items, row):
"""Displays a label description and a list of items.
If more than one item is given
The label will be "This %{name}:"
The list will be displayed using a QLabel
If only one item is given:
The label with be "These %{name}s:"
The list will be displayed using a QTextEdit
@type name: str
@param name:
@type items: list<str>
@param items:
@type row: int
@param row: The row to place the widget on
@rtype: QTextEdit or QLabel
@return: A reference to the widget displaying the list"""
if items:
if len(items) > 1:
self._addLabel("These %ss:" % name, row, 0)
else:
self._addLabel("This %s:" % name, row, 0)
if len(items) > 5:
display = self._addTextEdit(row, 1, "")
else:
display = self._addLabel("", row, 1)
if isinstance(items[0], str):
display.setText("\n".join(items))
else:
display.setText("\n".join(self._getNames(items)))
return display
def _removeAllWidgets(self):
"""Removes all widgets references in self._widgets"""
for widget in reversed(self._widgets):
self.layout().removeWidget(widget)
self._widgets.remove(widget)
widget.hide()
################################################################################
class PageDependType(AbstractWizardPage):
"""This page asks the user for the type of dependency to setup
PAGE_SELECT_DEPEND_TYPE"""
def __init__(self, parent, jobs, layers = [], frames = []):
AbstractWizardPage.__init__(self, parent)
self.setTitle("Select Dependency Type")
# this should come from a field
self.jobs = jobs
self.layers = layers
self.frames = frames
self._displayItems("Job", jobs, 0)
self._displayItems("Layer", layers, 1)
self._displayItems("Frame", frames, 2)
if frames:
allowed_options = [FOJ, FOL, FOF]
elif layers:
allowed_options = [LOJ, LOL, LOF, FBF]
if len(layers) == 1:
allowed_options.extend([FOJ, FOL, FOF])
allowed_options.extend([LOS])
elif jobs:
allowed_options = [JOJ, JOL, JOF, JFBF]
if len(jobs) == 1:
allowed_options.extend([LOJ, LOL, LOF, FBF, FOJ, FOL, FOF, LOS])
# Add the group box for the dependency type options
self.__groupBox = QtWidgets.QGroupBox()
self.__groupLayout = QtWidgets.QGridLayout(self.__groupBox)
# Add the options to the group box
self.__options = {}
for option in allowed_options:
self.__options[option] = QtWidgets.QRadioButton(DEPEND_NAME[option])
self.__groupLayout.addWidget(self.__options[option])
self.__options[allowed_options[0]].setChecked(True)
self.layout().addWidget(self.__groupBox, 3, 0, 1, -1)
def __msg(self):
for item in [("frame", self.wizard().frames),
("layer", self.wizard().layers),
("job", self.wizard().jobs)]:
if len(item[1]) > 1:
return "these %ss" % item[0]
elif item[1]:
return "this %s" % item[0]
def initializePage(self):
self.setSubTitle("What type of dependency would you like %s to have?" % self.__msg())
# it is not respecting or providing my size hints otherwise
self.wizard().setMinimumSize(500, 500)
def validatePage(self):
for option in self.__options:
if self.__options[option].isChecked():
self.wizard().dependType = option
return True
return False
def nextId(self):
"""Returns the next page id
@return: next page id
@rtype: int"""
if not self.wizard().dependType:
return PAGE_SELECT_DEPEND_TYPE
elif self.frames:
return PAGE_SELECT_ONJOB
elif len(self.layers) == 1 and \
self.wizard().dependType in (FOJ, FOL, FOF):
return PAGE_SELECT_JOB_FRAME
elif self.layers:
return PAGE_SELECT_ONJOB
elif len(self.jobs) == 1 and \
self.wizard().dependType in (LOJ, LOL, LOF, FOJ, FOL, FOF, FBF, LOS):
return PAGE_SELECT_JOB_LAYER
elif self.jobs:
return PAGE_SELECT_ONJOB
else:
logger.critical("error, no place to go: jobs:%s layers:%s frames:%s type:%s" % (len(self.jobs), len(self.layers), len(self.frames), self.wizard().dependType))
raise RuntimeError()
################################################################################
class PageSelectLayer(AbstractWizardPage):
"""This page asks the user for the layer that should depend on something
PAGE_SELECT_JOB_LAYER"""
def __init__(self, parent):
AbstractWizardPage.__init__(self, parent)
self.setTitle("Select Layer")
self.setSubTitle("What layer needs the dependency?")
self._addLabel("Layer:", 0, 0)
self.__layerList = self._addListWidget(2, 0, QtWidgets.QAbstractItemView.MultiSelection)
def initializePage(self):
self.wizard().layerOptions = self.wizard().jobs[0].getLayers()
QtWidgets.QWizardPage.initializePage(self)
self.__layerList.clear()
self.__layerList.addItems([layer for layer in self._getNames(self.wizard().layerOptions)])
for num in range(self.__layerList.count()):
self.__layerList.item(num).setSelected(str(self.__layerList.item(num).text()) in self._getNames(self.wizard().onLayer))
def validatePage(self):
self.wizard().layers = []
for num in range(self.__layerList.count()):
if self.__layerList.item(num).isSelected():
self.wizard().layers.append(str(self.__layerList.item(num).text()))
if self.wizard().layers:
return True
QtWidgets.QMessageBox.warning(self,
"Warning",
"Please select one or more layers or go back "
"and change the dependency type",
QtWidgets.QMessageBox.Ok)
return False
def nextId(self):
"""Returns the next page id
@return: next page id
@rtype: int"""
if self.wizard().dependType in (FOJ, FOL, FOF):
return PAGE_SELECT_JOB_FRAME
else:
return PAGE_SELECT_ONJOB
################################################################################
class PageSelectFrame(AbstractWizardPage):
"""This page asks the user for the frames that should depend on something
PAGE_SELECT_JOB_FRAME"""
def __init__(self, parent):
AbstractWizardPage.__init__(self, parent)
self.setTitle("Select Frame")
self.setSubTitle("What frames need the dependency?")
self._addLabel("Frame:", 0, 0)
self.__frame = self._addLineEdit(1, 9, "1")
self.registerField("frame", self.__frame)
def initializePage(self):
QtWidgets.QWizardPage.initializePage(self)
def validatePage(self):
frames = str(self.field("frame"))
if frames:
try:
fs = FileSequence.FrameSet(frames)
fs.normalize()
self.wizard().frames = list(map(int, fs.getAll()))
return True
except Exception as e:
list(map(logger.warning, cuegui.Utils.exceptionOutput(e)))
return False
def nextId(self):
"""Returns the next page id
@return: next page id
@rtype: int"""
return PAGE_SELECT_ONJOB
################################################################################
class PageSelectOnJob(AbstractWizardPage):
"""This page asks the user for the job should be depended on
PAGE_SELECT_ONJOB"""
def __init__(self, parent):
AbstractWizardPage.__init__(self, parent)
self.setTitle("Select Job(s) to Depend On")
self.setSubTitle("What job(s) should it depend on?")
self._addLabel("Depend on Job:", 0, 0)
self.__jobFilterLineEdit = self._addLineEdit(2, 0, "")
self.__jobFilterLineEdit.textChanged.connect(self.filterJobs)
self.__jobList = self._addListWidget(3, 0)
def filterJobs(self, text):
# Exlcude job names that would cause a job to depend on itself
exclude = []
if self.wizard().dependType in (JOJ, LOJ, FOJ, JFBF):
for job in self.wizard().jobs:
exclude.append(job.data.name)
self.__jobList.clear()
self.__jobList.addItems([job for job in self.wizard().onJobOptions if re.search(str(text), job, re.IGNORECASE) and not job in exclude])
def initializePage(self):
# If the filter edit box is empty, populate it with SHOW-SHOT-USER_
# based on the first job selected to receive the dependency
if not self.__jobFilterLineEdit.text():
self.__jobFilterLineEdit.setText(self.wizard().jobs[0].data.name.split("_")[0] + "_")
if self.wizard().dependType in (JOJ, LOJ, FOJ, JFBF):
self.__jobList.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
else:
self.__jobList.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
for num in range(self.__jobList.count()):
self.__jobList.item(num).setSelected(str(self.__jobList.item(num).text()) in self.wizard().onJob)
QtWidgets.QWizardPage.initializePage(self)
def validatePage(self):
self.wizard().onJob = []
for num in range(self.__jobList.count()):
if self.__jobList.item(num).isSelected():
self.wizard().onJob.append(str(self.__jobList.item(num).text()))
if self.wizard().onJob:
return True
QtWidgets.QMessageBox.warning(self,
"Warning",
"Please select one or more jobs or go back "
"and change the dependency type",
QtWidgets.QMessageBox.Ok)
return False
def nextId(self):
"""Returns the next page id
@return: next page id
@rtype: int"""
if self.wizard().dependType in (JOL, JOF, LOL, LOF, FOL, FOF, FBF, LOS):
return PAGE_SELECT_ONLAYER
else:
return PAGE_CONFIRMATION
################################################################################
class PageSelectOnLayer(AbstractWizardPage):
"""This page asks the user for the layer should be depended on:
PAGE_SELECT_ONLAYER"""
def __init__(self, parent):
AbstractWizardPage.__init__(self, parent)
self.setTitle("Select Layer to Depend On")
self.setSubTitle("What Layer should it depend on?")
self._addLabel("Depend on Layer:", 0, 0)
self.__onLayerList = self._addListWidget(1, 0)
def initializePage(self):
QtWidgets.QWizardPage.initializePage(self)
self.wizard().onLayerOptions = opencue.api.findJob(self.wizard().onJob[0]).getLayers()
if self.wizard().dependType in (LOS,):
self.wizard().onLayerOptions = [layer for layer in self.wizard().onLayerOptions if 'simulation' in layer.data.services or 'simulationhi' in layer.data.services or 'houdini' in layer.data.services]
if self.wizard().dependType in (JOL, LOL, FOL, FBF, JOF, LOF, FOF):
self.__onLayerList.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
else:
self.__onLayerList.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.__onLayerList.clear()
self.__onLayerList.addItems([layer for layer in self._getNames(self.wizard().onLayerOptions)])
for num in range(self.__onLayerList.count()):
self.__onLayerList.item(num).setSelected(str(self.__onLayerList.item(num).text()) in self._getNames(self.wizard().onLayer))
def validatePage(self):
self.wizard().onLayer = []
for num in range(self.__onLayerList.count()):
if self.__onLayerList.item(num).isSelected():
self.wizard().onLayer.append(str(self.__onLayerList.item(num).text()))
if self.wizard().onLayer:
return True
QtWidgets.QMessageBox.warning(self,
"Warning",
"Please select one or more layers or go back "
"and change the dependency type",
QtWidgets.QMessageBox.Ok)
return False
def nextId(self):
"""Returns the next page id
@return: next page id
@rtype: int"""
if self.wizard().dependType in (JOF, LOF, FOF, LOS):
return PAGE_SELECT_ONFRAME
else:
return PAGE_CONFIRMATION
################################################################################
class PageSelectOnFrame(AbstractWizardPage):
"""This page asks the user for the frame should be depended on:
PAGE_SELECT_ONFRAME"""
def __init__(self, parent):
AbstractWizardPage.__init__(self, parent)
self.setTitle("Select Frame to Depend On")
self.setSubTitle("What Frames should it depend on?")
self._addLabel("Depend on Frame:", 0, 0)
self.__frame = self._addLineEdit(1, 0, "1")
self.registerField("onFrame", self.__frame)
self.setField("onFrame", "")
def initializePage(self):
QtWidgets.QWizardPage.initializePage(self)
def validatePage(self):
frames = str(self.field("onFrame"))
if frames:
try:
fs = FileSequence.FrameSet(frames)
fs.normalize()
self.wizard().onFrame = list(map(int, fs.getAll()))
return True
except Exception as e:
list(map(logger.warning, cuegui.Utils.exceptionOutput(e)))
return False
def nextId(self):
"""Returns the next page id
@return: next page id
@rtype: int"""
return PAGE_CONFIRMATION
################################################################################
class PageConfirmation(AbstractWizardPage):
"""
PAGE_CONFIRMATION"""
def __init__(self, parent, jobs, layers, frames):
AbstractWizardPage.__init__(self, parent)
self.setTitle("Confirmation")
self.setSubTitle("Are you sure?")
def initializePage(self):
self._removeAllWidgets()
self._displayItems("Dependency type", [DEPEND_NAME[self.wizard().dependType]], 0)
self._addLabel("", 1, 0)
self._displayItems("Job", self.wizard().jobs, 2)
if self.wizard().dependType in (LOJ, LOL, LOF, FOJ, FOL, FOF, FBF, LOS):
self._displayItems("Layer", self.wizard().layers, 3)
if self.wizard().dependType in (FOJ, FOL, FOF, LOS):
self._displayItems("Frame", self.wizard().frames, 4)
self._addLabel("", 5, 0)
self._addLabel("Depends on:", 6, 0, 1, -1, QtCore.Qt.AlignCenter)
self._displayItems("Job", self.wizard().onJob, 7)
if self.wizard().dependType in (JOL, JOF, LOL, LOF, FOL, FOF, FBF, LOS):
self._displayItems("Layer", self.wizard().onLayer, 8)
if self.wizard().dependType in (JOF, LOF, FOF, LOS):
self._displayItems("Frame", self.wizard().onFrame, 9)
def validatePage(self):
# Just names:
jobs = self._getNames(self.wizard().jobs)
layers = self._getNames(self.wizard().layers)
frames = self._getNames(self.wizard().frames)
onJobs = self.wizard().onJob
onLayers = self.wizard().onLayer or [None]
onFrames = self.wizard().onFrame or [None]
self.work = []
if self.wizard().dependType == JFBF:
for onJob in onJobs:
onLayers = opencue.api.findJob(onJob).getLayers()
for job in jobs:
for layer in opencue.api.findJob(job).getLayers():
for onLayer in onLayers:
if layer.data.type == onLayer.data.type:
self.__addDependWork(layer, onLayer)
cuegui.ProgressDialog.ProgressDialog(
"Setting up Hard Depend",
self.__createFrameByFrameDepend,
self.work,
2,
PROGRESS_TITLE,
PROGRESS_TEXT,
self.parent())
return True
elif frames:
for onJob in onJobs:
for onLayer in onLayers:
for framelayer in frames:
if framelayer.find("-") != -1:
frame, layer = framelayer.split("-")
else:
frame = framelayer
layer = layers[0]
for onFrame in onFrames:
self.__addDependWork(self.wizard().dependType, jobs[0], layer, int(frame), onJob, onLayer, onFrame)
elif layers:
for onJob in onJobs:
for onLayer in onLayers:
for layer in layers:
for onFrame in onFrames:
self.__addDependWork(self.wizard().dependType, jobs[0], layer, None, onJob, onLayer, onFrame)
elif jobs:
for onJob in onJobs:
for onLayer in onLayers:
for job in jobs:
for onFrame in onFrames:
self.__addDependWork(self.wizard().dependType, job, None, None, onJob, onLayer, onFrame)
cuegui.ProgressDialog.ProgressDialog(
"Setting up dependencies",
cuegui.Cuedepend.createDepend,
self.work,
2,
PROGRESS_TITLE,
PROGRESS_TEXT,
self.parent())
return True
def __addDependWork(self, *args):
"""Adds arguements for a call to Cuedepend.createDepend to a list
@type args: string, string, string, int, string, string, int
@param args: The arguements required by Cuedepend.createDepend"""
self.work.append(args)
def __createFrameByFrameDepend(self, layer, onLayer):
"""A function callback provided to the ProgressDialog that sets up a
frame by frame dependency
@type layer: opencue.wrappers.layer.Layer
@param layer: The layer that contains the frames that will have the dependency
@type onLayer: opencue.wrappers.layer.Layer
@param onLayer: The layer that contains that frames that will be depended on"""
layer.createFrameByFrameDependency(onLayer)
| 38.027487 | 208 | 0.592985 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import map
from builtins import str
from builtins import range
import re
from PySide2 import QtCore
from PySide2 import QtWidgets
import FileSequence
import opencue
import cuegui.Cuedepend
import cuegui.Logger
import cuegui.Utils
import cuegui.ProgressDialog
logger = cuegui.Logger.getLogger(__file__)
__all__ = ["DependWizard"]
JOJ = opencue.api.depend_pb2.JOB_ON_JOB
JOL = opencue.api.depend_pb2.JOB_ON_LAYER
JOF = opencue.api.depend_pb2.JOB_ON_FRAME
LOJ = opencue.api.depend_pb2.LAYER_ON_JOB
LOL = opencue.api.depend_pb2.LAYER_ON_LAYER
LOF = opencue.api.depend_pb2.LAYER_ON_FRAME
FOJ = opencue.api.depend_pb2.FRAME_ON_JOB
FOL = opencue.api.depend_pb2.FRAME_ON_LAYER
FOF = opencue.api.depend_pb2.FRAME_ON_FRAME
FBF = opencue.api.depend_pb2.FRAME_BY_FRAME
LOS = opencue.api.depend_pb2.LAYER_ON_SIM_FRAME
JFBF = "JobFrameByFrame"
PAGE_SELECT_DEPEND_TYPE = 10
PAGE_SELECT_JOB_LAYER = 20
PAGE_SELECT_JOB_FRAME = 30
PAGE_SELECT_ONJOB = 40
PAGE_SELECT_ONLAYER = 50
PAGE_SELECT_ONFRAME = 60
PAGE_CONFIRMATION = 100
DEPEND_NAME = {JOJ: "Job On Job (soft depend)",
JOL: "Job On Layer",
JOF: "Job On Frame",
JFBF: "Frame By Frame for all layers (Hard Depend)",
LOJ: "Layer On Job",
LOL: "Layer On Layer",
LOF: "Layer On Frame",
FOJ: "Frame On Job",
FOL: "Frame On Layer",
FOF: "Frame On Frame",
FBF: "Frame By Frame",
LOS: "Layer on Simulation Frame"}
PROGRESS_TITLE = "Cancel setting up dependencies?"
PROGRESS_TEXT = "Are you sure you want to cancel setting up these dependencies?\n\n" + \
"The dependencies that are already partially setup will still remain."
class DependWizard(QtWidgets.QWizard):
def __init__(self, parent, jobs, layers = [], frames = []):
QtWidgets.QWizard.__init__(self, parent)
jobs = [job for job in jobs if job.data.show == jobs[0].data.show]
self.jobs = jobs
self.layers = [layer.data.name for layer in layers]
self.layerOptions = layers
self.frames = [frame.data.name for frame in frames]
self.dependType = None
self.onJobOptions = []
self.onJob = []
self.onLayerOptions = []
self.onLayer = []
self.onFrame = []
self.__pages = {}
self.__pages [PAGE_SELECT_DEPEND_TYPE] = PageDependType(self, jobs, layers, frames)
self.__pages [PAGE_SELECT_JOB_LAYER] = PageSelectLayer(self)
self.__pages [PAGE_SELECT_JOB_FRAME] = PageSelectFrame(self)
self.__pages [PAGE_SELECT_ONJOB] = PageSelectOnJob(self)
self.__pages [PAGE_SELECT_ONLAYER] = PageSelectOnLayer(self)
self.__pages [PAGE_SELECT_ONFRAME] = PageSelectOnFrame(self)
self.__pages [PAGE_CONFIRMATION] = PageConfirmation(self, jobs, layers, frames)
for key in self.__pages :
self.setPage(key, self.__pages[key])
self.setStartId(PAGE_SELECT_DEPEND_TYPE)
self.setWindowTitle("Dependency Wizard")
self.setOption(QtWidgets.QWizard.IndependentPages, False)
self._onJobOptionsPopulate()
self.show()
def _onJobOptionsPopulate(self):
self.onJobOptions = []
try:
show = self.jobs[0].data.name.split('-')[0]
self.onJobOptions = [name for name in sorted(opencue.api.getJobNames())
if name.startswith(show)]
except Exception as e:
logger.critical("Failed getting list of jobs")
list(map(logger.critical, cuegui.Utils.exceptionOutput(e)))
| true | true |
1c31733816b161a9299c660c1cca950063337088 | 315 | py | Python | tests/tests/base.py | aryabartar/event-action | a3f0cec7813b77455e900965c088668653cd3562 | [
"MIT"
] | 1 | 2021-09-26T10:52:36.000Z | 2021-09-26T10:52:36.000Z | tests/tests/base.py | aryabartar/event-action | a3f0cec7813b77455e900965c088668653cd3562 | [
"MIT"
] | null | null | null | tests/tests/base.py | aryabartar/event-action | a3f0cec7813b77455e900965c088668653cd3562 | [
"MIT"
] | null | null | null | from django.test import TestCase
class TestBase(TestCase):
def assert_calls(self, mocked_function, call_value):
mocked_function.assert_any_call(call_value)
def assert_not_calls(self, *args, **kwargs):
with self.assertRaises(AssertionError):
self.assert_calls(*args, **kwargs)
| 28.636364 | 56 | 0.71746 | from django.test import TestCase
class TestBase(TestCase):
def assert_calls(self, mocked_function, call_value):
mocked_function.assert_any_call(call_value)
def assert_not_calls(self, *args, **kwargs):
with self.assertRaises(AssertionError):
self.assert_calls(*args, **kwargs)
| true | true |
1c317361d7bb04b24a48579f21c113b2fc37df34 | 4,399 | py | Python | docs/source/conf.py | EmreOzkose/k2 | 818b138b33eabe440601df8910a2b97ac088594b | [
"Apache-2.0"
] | 1 | 2021-09-27T06:25:10.000Z | 2021-09-27T06:25:10.000Z | docs/source/conf.py | jimbozhang/k2 | eeeabf187aae5fb4bb91dc66dada32a0e555db6c | [
"Apache-2.0"
] | 1 | 2021-09-14T08:01:50.000Z | 2021-09-14T08:01:50.000Z | docs/source/conf.py | jimbozhang/k2 | eeeabf187aae5fb4bb91dc66dada32a0e555db6c | [
"Apache-2.0"
] | 1 | 2021-09-03T07:26:43.000Z | 2021-09-03T07:26:43.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import sys
sys.path.insert(0, os.path.abspath('../../k2/python'))
sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'k2'
copyright = '2020, k2 development team'
author = 'k2 development team'
def get_version():
cmake_file = '../../CMakeLists.txt'
with open(cmake_file) as f:
content = f.read()
version = re.search(r'set\(K2_VERSION (.*)\)', content).group(1)
return version.strip('"')
# The full version, including alpha/beta/rc tags
version = get_version()
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx_rtd_theme',
'sphinxcontrib.bibtex',
]
bibtex_bibfiles = ['refs.bib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['installation/images/*.md']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_show_sourcelink = True
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
pygments_style = 'sphinx'
numfig = True
html_context = {
'display_github': True,
'github_user': 'k2-fsa',
'github_repo': 'k2',
'github_version': 'master',
'conf_py_path': '/k2/docs/source/',
}
# refer to
# https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html
html_theme_options = {
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
}
autodoc_default_options = {
'content': 'both',
'members': None,
'member-order': 'bysource',
'special-members': '__init__',
'undoc-members': True,
'exclude-members': '__weakref__'
}
# Resolve function for the linkcode extension.
# Modified from https://github.com/rwth-i6/returnn/blob/master/docs/conf.py
def linkcode_resolve(domain, info):
def find_source():
# try to find the file and line number, based on code from numpy:
# https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L286
obj = sys.modules[info['module']]
for part in info['fullname'].split('.'):
obj = getattr(obj, part)
import inspect
import os
fn = inspect.getsourcefile(obj)
fn = os.path.relpath(fn, start='k2')
source, lineno = inspect.getsourcelines(obj)
return fn, lineno, lineno + len(source) - 1
if domain != 'py' or not info['module']:
return None
try:
filename = '{}#L{}-L{}'.format(*find_source())
except Exception:
return None
if '_k2' in filename:
return None
idx = filename.rfind('k2')
filename = filename[idx:]
return f'https://github.com/k2-fsa/k2/blob/master/k2/python/{filename}'
| 29.92517 | 79 | 0.654012 |
import os
import re
import sys
sys.path.insert(0, os.path.abspath('../../k2/python'))
sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
project = 'k2'
copyright = '2020, k2 development team'
author = 'k2 development team'
def get_version():
cmake_file = '../../CMakeLists.txt'
with open(cmake_file) as f:
content = f.read()
version = re.search(r'set\(K2_VERSION (.*)\)', content).group(1)
return version.strip('"')
# The full version, including alpha/beta/rc tags
version = get_version()
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx_rtd_theme',
'sphinxcontrib.bibtex',
]
bibtex_bibfiles = ['refs.bib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['installation/images/*.md']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_show_sourcelink = True
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
pygments_style = 'sphinx'
numfig = True
html_context = {
'display_github': True,
'github_user': 'k2-fsa',
'github_repo': 'k2',
'github_version': 'master',
'conf_py_path': '/k2/docs/source/',
}
# refer to
# https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html
html_theme_options = {
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
}
autodoc_default_options = {
'content': 'both',
'members': None,
'member-order': 'bysource',
'special-members': '__init__',
'undoc-members': True,
'exclude-members': '__weakref__'
}
# Resolve function for the linkcode extension.
# Modified from https://github.com/rwth-i6/returnn/blob/master/docs/conf.py
def linkcode_resolve(domain, info):
def find_source():
# try to find the file and line number, based on code from numpy:
# https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L286
obj = sys.modules[info['module']]
for part in info['fullname'].split('.'):
obj = getattr(obj, part)
import inspect
import os
fn = inspect.getsourcefile(obj)
fn = os.path.relpath(fn, start='k2')
source, lineno = inspect.getsourcelines(obj)
return fn, lineno, lineno + len(source) - 1
if domain != 'py' or not info['module']:
return None
try:
filename = '{}#L{}-L{}'.format(*find_source())
except Exception:
return None
if '_k2' in filename:
return None
idx = filename.rfind('k2')
filename = filename[idx:]
return f'https://github.com/k2-fsa/k2/blob/master/k2/python/{filename}'
| true | true |
1c31739de64ccba83ba9ae22b845a3d72a1136cb | 2,367 | py | Python | manga_py/base_classes/archive.py | terepanda/manga-dl | 2ac3a314c2f67ad22f6da7e293ec6b3a132d7834 | [
"MIT"
] | null | null | null | manga_py/base_classes/archive.py | terepanda/manga-dl | 2ac3a314c2f67ad22f6da7e293ec6b3a132d7834 | [
"MIT"
] | null | null | null | manga_py/base_classes/archive.py | terepanda/manga-dl | 2ac3a314c2f67ad22f6da7e293ec6b3a132d7834 | [
"MIT"
] | null | null | null | from zipfile import ZipFile, ZIP_DEFLATED
# from PIL import Image as PilImage
from manga_py.image import Image
from os import path
from time import sleep
from manga_py.fs import is_file, make_dirs, basename, dirname, unlink, get_temp_path
class Archive:
_archive = None
_writes = None
files = None
not_change_files_extension = False
no_webp = False
def __init__(self):
self.files = []
self._writes = {}
def write_file(self, data, in_arc_name):
self._writes[in_arc_name] = data
def add_file(self, file, in_arc_name=None):
if in_arc_name is None:
in_arc_name = basename(file)
self.files.append((file, in_arc_name))
def set_files_list(self, files):
self.files = files
def add_book_info(self, data):
self.write_file('comicbook.xml', data)
def __add_files(self):
for file in self.files:
if is_file(file[0]):
ext = self.__update_image_extension(file[0])
if self.no_webp and ext[ext.rfind('.'):] == '.webp':
jpeg = ext[:ext.rfind('.')] + '.jpeg'
jpeg_path = path.join(dirname(file[0]), jpeg)
Image(file[0]).convert(jpeg_path)
file = jpeg_path, jpeg
self._archive.write(*file)
def __add_writes(self):
for file in self._writes:
self._archive.writestr(file, self._writes[file])
def add_info(self, data):
self.write_file(data, 'info.txt')
def make(self, dst):
if not len(self.files) and not len(self._writes):
return
make_dirs(dirname(dst))
self._archive = ZipFile(dst, 'w', ZIP_DEFLATED)
try:
self.__add_files()
self.__add_writes()
self._archive.close()
except OSError as e:
self._archive.close()
raise e
self._archive.close()
self._maked()
def _maked(self):
for file in self.files:
unlink(file[0])
def __update_image_extension(self, filename) -> str:
fn, extension = path.splitext(filename)
if not self.not_change_files_extension:
ext = Image.real_extension(get_temp_path(filename))
if ext:
extension = ext
return basename(fn + extension)
| 28.865854 | 84 | 0.589354 | from zipfile import ZipFile, ZIP_DEFLATED
from manga_py.image import Image
from os import path
from time import sleep
from manga_py.fs import is_file, make_dirs, basename, dirname, unlink, get_temp_path
class Archive:
_archive = None
_writes = None
files = None
not_change_files_extension = False
no_webp = False
def __init__(self):
self.files = []
self._writes = {}
def write_file(self, data, in_arc_name):
self._writes[in_arc_name] = data
def add_file(self, file, in_arc_name=None):
if in_arc_name is None:
in_arc_name = basename(file)
self.files.append((file, in_arc_name))
def set_files_list(self, files):
self.files = files
def add_book_info(self, data):
self.write_file('comicbook.xml', data)
def __add_files(self):
for file in self.files:
if is_file(file[0]):
ext = self.__update_image_extension(file[0])
if self.no_webp and ext[ext.rfind('.'):] == '.webp':
jpeg = ext[:ext.rfind('.')] + '.jpeg'
jpeg_path = path.join(dirname(file[0]), jpeg)
Image(file[0]).convert(jpeg_path)
file = jpeg_path, jpeg
self._archive.write(*file)
def __add_writes(self):
for file in self._writes:
self._archive.writestr(file, self._writes[file])
def add_info(self, data):
self.write_file(data, 'info.txt')
def make(self, dst):
if not len(self.files) and not len(self._writes):
return
make_dirs(dirname(dst))
self._archive = ZipFile(dst, 'w', ZIP_DEFLATED)
try:
self.__add_files()
self.__add_writes()
self._archive.close()
except OSError as e:
self._archive.close()
raise e
self._archive.close()
self._maked()
def _maked(self):
for file in self.files:
unlink(file[0])
def __update_image_extension(self, filename) -> str:
fn, extension = path.splitext(filename)
if not self.not_change_files_extension:
ext = Image.real_extension(get_temp_path(filename))
if ext:
extension = ext
return basename(fn + extension)
| true | true |
1c317445c97d0447c52e84d7cc347aa8c99fad33 | 54,128 | py | Python | ovpr_atp/awards/migrations/0026_auto__chg_field_proposal_principal_investigator__del_unique_proposal_p.py | pawanacharya1979/Awdportal | f0ed6ad723d70fae4737e517d4dca07b2aef176a | [
"MIT"
] | null | null | null | ovpr_atp/awards/migrations/0026_auto__chg_field_proposal_principal_investigator__del_unique_proposal_p.py | pawanacharya1979/Awdportal | f0ed6ad723d70fae4737e517d4dca07b2aef176a | [
"MIT"
] | null | null | null | ovpr_atp/awards/migrations/0026_auto__chg_field_proposal_principal_investigator__del_unique_proposal_p.py | pawanacharya1979/Awdportal | f0ed6ad723d70fae4737e517d4dca07b2aef176a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Proposal', fields ['principal_investigator']
db.delete_unique(u'awards_proposal', ['principal_investigator_id'])
# Changing field 'Proposal.principal_investigator'
db.alter_column(u'awards_proposal', 'principal_investigator_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['awards.AwardManager'], null=True))
def backwards(self, orm):
# Changing field 'Proposal.principal_investigator'
db.alter_column(u'awards_proposal', 'principal_investigator_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['awards.AwardManager'], unique=True, null=True, on_delete=models.SET_NULL))
# Adding unique constraint on 'Proposal', fields ['principal_investigator']
db.create_unique(u'awards_proposal', ['principal_investigator_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'awards.allowedcostschedule': {
'Meta': {'ordering': "['name']", 'object_name': 'AllowedCostSchedule'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'awards.award': {
'Meta': {'object_name': 'Award'},
'award_acceptance_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'award_closeout_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'award_management_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'award_management_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'award_negotiation_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['auth.User']"}),
'award_setup_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'first_proposal': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'award_first_proposal'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['awards.Proposal']", 'blank': 'True', 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_from_lotus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subaward_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subaward_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'awards.awardacceptance': {
'Meta': {'object_name': 'AwardAcceptance'},
'admin_establishment': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'agency_award_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'award_acceptance_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_direct_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_issue_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_total_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contracting_official': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'current_modification': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'eas_status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'expedited_review': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'explanation': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'fcoi_cleared_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'foreign_travel': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'full_f_a_recovery': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gmo_co_email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'gmo_co_phone_number': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major_project': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'mfa_investigators': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'phs_funded': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'reduction_in_budget': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sponsor_award_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'awards.awardcloseout': {
'Meta': {'object_name': 'AwardCloseout'},
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'awards.awardmanagement': {
'Meta': {'object_name': 'AwardManagement'},
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'awards.awardmanager': {
'Meta': {'object_name': 'AwardManager'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'gwid': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'system_user': ('django.db.models.fields.BooleanField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'awards.awardnegotiation': {
'Meta': {'object_name': 'AwardNegotiation'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'award_type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'certificates_insurance': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'current_modification': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'data_security_restrictions': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'everify': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'foreign_restrictions': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'government_property': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gw_background_ip': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gw_doesnt_own_ip': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'insurance_renewal': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'negotiation_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'negotiation_status': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'negotiator': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'other_award_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'publication_restriction': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'related_other_agreements': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'related_other_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'retention_period': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subcontracting_plan': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'under_master_agreement': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.awardorganization': {
'Meta': {'ordering': "['name']", 'object_name': 'AwardOrganization'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'org_info1_meaning': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'org_info2_meaning': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'organization_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
u'awards.awardsetup': {
'Meta': {'object_name': 'AwardSetup'},
'allowed_cost_schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AllowedCostSchedule']", 'null': 'True', 'blank': 'True'}),
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True'}),
'award_setup_complete': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardTemplate']", 'null': 'True', 'blank': 'True'}),
'bill_to_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_events': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'budget_restrictions': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'cfda_number': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.CFDANumber']", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'cost_sharing_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'document_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'eas_award_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'equipment_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'expanded_authority': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'federal_negotiated_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.FedNegRate']", 'null': 'True', 'blank': 'True'}),
'final_reports_due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'financial_reporting_oth': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'financial_reporting_req': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '14', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indirect_cost_schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.IndirectCost']", 'null': 'True', 'blank': 'True'}),
'invention_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'nine_ninety_form_needed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'onr_administered_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'patent_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'performance_site': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'pre_award_spending_auth': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'property_equip_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'property_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'qa_screening_complete': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'record_destroy_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'sp_type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'task_location': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'technical_reporting_oth': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'technical_reporting_req': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '14', 'blank': 'True'}),
'wait_for': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'awards.awardtemplate': {
'Meta': {'ordering': "['number']", 'object_name': 'AwardTemplate'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'awards.cfdanumber': {
'Meta': {'ordering': "['flex_value']", 'object_name': 'CFDANumber'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'flex_value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'primary_key': 'True'})
},
u'awards.easmapping': {
'Meta': {'unique_together': "(('interface', 'field', 'incoming_value', 'atp_model', 'atp_pk'),)", 'object_name': 'EASMapping'},
'atp_model': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'atp_pk': ('django.db.models.fields.IntegerField', [], {}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incoming_value': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'interface': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'})
},
u'awards.fednegrate': {
'Meta': {'ordering': "['description']", 'object_name': 'FedNegRate'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'flex_value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'primary_key': 'True'})
},
u'awards.finalreport': {
'Meta': {'object_name': 'FinalReport'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'submitted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.fundingsource': {
'Meta': {'ordering': "['number']", 'object_name': 'FundingSource'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'awards.indirectcost': {
'Meta': {'ordering': "['rate_schedule']", 'object_name': 'IndirectCost'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'rate_schedule': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'awards.keypersonnel': {
'Meta': {'object_name': 'KeyPersonnel'},
'academic_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'calendar_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'effort': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'project_role': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Proposal']"}),
'summer_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'})
},
u'awards.performancesite': {
'Meta': {'object_name': 'PerformanceSite'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Proposal']"}),
'ps_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_country': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'ps_duns': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ps_organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'ps_street1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_street2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_zipcode': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
u'awards.primesponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'PrimeSponsor'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.IntegerField', [], {})
},
u'awards.priorapproval': {
'Meta': {'object_name': 'PriorApproval'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_submitted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'request': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'})
},
u'awards.proposal': {
'Meta': {'object_name': 'Proposal'},
'academic_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'agency_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.FundingSource']", 'null': 'True', 'blank': 'True'}),
'agency_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'application_type_code': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'are_human_subjects_used': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'are_vertebrate_animals_used': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']", 'null': 'True', 'blank': 'True'}),
'budget_first_per_end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'budget_first_per_start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'calendar_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cost_shr_mand_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'cost_shr_mand_is_committed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cost_shr_mand_source': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'cost_shr_vol_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'cost_shr_vol_is_committed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cost_shr_vol_source': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'department_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardOrganization']", 'null': 'True', 'blank': 'True'}),
'departmental_id_primary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'departmental_id_secondary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'departmental_name_primary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'departmental_name_secondary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'division_name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'dummy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'duns_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'federal_identifier': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'iacuc_approval_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'iacuc_protocol_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irb_protocol_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'irb_review_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'is_agency_cert_doc_attached': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_change_in_grantee_inst': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_cost_shr_auth_attached': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_first_proposal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_haz_mat': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_iacuc_review_pending': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_irb_review_pending': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_signed_coi_disc_attached': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_signed_ip_waiver_attached': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_status_waiver_required': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_subcontract': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'lotus_agency_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'lotus_department_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'lotus_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'naics_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'opportunity_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'opportunity_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'principal_investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardManager']", 'null': 'True', 'blank': 'True'}),
'project_end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'project_start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'project_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'project_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'proposal_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'proposal_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'responsible_entity': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'sponsor_deadline': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'summer_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'tracking_number': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'who_is_prime': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.PrimeSponsor']", 'null': 'True', 'blank': 'True'}),
'will_involve_foreign_contract': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'will_involve_foreign_nationals': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'will_involve_shipment': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.proposalintake': {
'Meta': {'object_name': 'ProposalIntake'},
'agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'announcement_link': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fcoi_submitted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jit_request': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'jit_response_submitted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'phs_funded': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'prime_sponsor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'principal_investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardManager']", 'null': 'True', 'blank': 'True'}),
'program_announcement': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'proposal_due_to_aor': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'proposal_due_to_ovpr': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'proposal_due_to_sponsor': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'proposal_outcome': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'spa1': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'})
},
u'awards.ptanumber': {
'Meta': {'object_name': 'PTANumber'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'award_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'banner_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'cs_banner_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'principal_investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardManager']", 'null': 'True', 'blank': 'True'}),
'project_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'task_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'total_pta_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
u'awards.reportsubmission': {
'Meta': {'object_name': 'ReportSubmission'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'submitted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.subaward': {
'Meta': {'object_name': 'Subaward'},
'agreement_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'approval_expiration': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'assist': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'cfda_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'citi_cleared': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_information': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_fully_executed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'debarment_check': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'duns_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ein': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fain': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fcoi_cleared': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'ffata_reportable': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ffata_submitted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'funding_mechanism': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'gw_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'international': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'modification_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'other_mechanism': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'reminder': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'risk': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'sent': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'subaward_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subaward_ready': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subaward_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subrecipient_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'tech_report_due': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'tech_report_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['awards'] | 101.744361 | 244 | 0.576873 |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.delete_unique(u'awards_proposal', ['principal_investigator_id'])
db.alter_column(u'awards_proposal', 'principal_investigator_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['awards.AwardManager'], null=True))
def backwards(self, orm):
db.alter_column(u'awards_proposal', 'principal_investigator_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['awards.AwardManager'], unique=True, null=True, on_delete=models.SET_NULL))
db.create_unique(u'awards_proposal', ['principal_investigator_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'awards.allowedcostschedule': {
'Meta': {'ordering': "['name']", 'object_name': 'AllowedCostSchedule'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'awards.award': {
'Meta': {'object_name': 'Award'},
'award_acceptance_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'award_closeout_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'award_management_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'award_management_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'award_negotiation_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['auth.User']"}),
'award_setup_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'first_proposal': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'award_first_proposal'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['awards.Proposal']", 'blank': 'True', 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_from_lotus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subaward_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subaward_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'awards.awardacceptance': {
'Meta': {'object_name': 'AwardAcceptance'},
'admin_establishment': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'agency_award_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'award_acceptance_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_direct_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_direct_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_indirect_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_issue_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_total_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'award_total_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contracting_official': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'current_modification': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'eas_status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'expedited_review': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'explanation': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'fcoi_cleared_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'foreign_travel': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'full_f_a_recovery': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gmo_co_email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'gmo_co_phone_number': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major_project': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'mfa_investigators': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'phs_funded': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'reduction_in_budget': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sponsor_award_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'awards.awardcloseout': {
'Meta': {'object_name': 'AwardCloseout'},
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'awards.awardmanagement': {
'Meta': {'object_name': 'AwardManagement'},
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'awards.awardmanager': {
'Meta': {'object_name': 'AwardManager'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'gwid': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'system_user': ('django.db.models.fields.BooleanField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'awards.awardnegotiation': {
'Meta': {'object_name': 'AwardNegotiation'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'award_type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'certificates_insurance': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'current_modification': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'data_security_restrictions': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'everify': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'foreign_restrictions': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'government_property': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gw_background_ip': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gw_doesnt_own_ip': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'insurance_renewal': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'negotiation_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'negotiation_status': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'negotiator': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'other_award_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'publication_restriction': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'related_other_agreements': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'related_other_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'retention_period': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subcontracting_plan': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'under_master_agreement': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.awardorganization': {
'Meta': {'ordering': "['name']", 'object_name': 'AwardOrganization'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'org_info1_meaning': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'org_info2_meaning': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'organization_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
u'awards.awardsetup': {
'Meta': {'object_name': 'AwardSetup'},
'allowed_cost_schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AllowedCostSchedule']", 'null': 'True', 'blank': 'True'}),
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True'}),
'award_setup_complete': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'award_template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardTemplate']", 'null': 'True', 'blank': 'True'}),
'bill_to_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_events': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'budget_restrictions': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'cfda_number': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.CFDANumber']", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'cost_sharing_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'document_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'eas_award_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'equipment_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'expanded_authority': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'federal_negotiated_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.FedNegRate']", 'null': 'True', 'blank': 'True'}),
'final_reports_due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'financial_reporting_oth': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'financial_reporting_req': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '14', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indirect_cost_schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.IndirectCost']", 'null': 'True', 'blank': 'True'}),
'invention_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'nine_ninety_form_needed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'onr_administered_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'patent_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'performance_site': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'pre_award_spending_auth': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'property_equip_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'property_reporting_req': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'qa_screening_complete': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'record_destroy_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'sp_type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'task_location': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'technical_reporting_oth': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'technical_reporting_req': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '14', 'blank': 'True'}),
'wait_for': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'awards.awardtemplate': {
'Meta': {'ordering': "['number']", 'object_name': 'AwardTemplate'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'awards.cfdanumber': {
'Meta': {'ordering': "['flex_value']", 'object_name': 'CFDANumber'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'flex_value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'primary_key': 'True'})
},
u'awards.easmapping': {
'Meta': {'unique_together': "(('interface', 'field', 'incoming_value', 'atp_model', 'atp_pk'),)", 'object_name': 'EASMapping'},
'atp_model': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'atp_pk': ('django.db.models.fields.IntegerField', [], {}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incoming_value': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'interface': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'})
},
u'awards.fednegrate': {
'Meta': {'ordering': "['description']", 'object_name': 'FedNegRate'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'flex_value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'primary_key': 'True'})
},
u'awards.finalreport': {
'Meta': {'object_name': 'FinalReport'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'submitted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.fundingsource': {
'Meta': {'ordering': "['number']", 'object_name': 'FundingSource'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'awards.indirectcost': {
'Meta': {'ordering': "['rate_schedule']", 'object_name': 'IndirectCost'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'rate_schedule': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'awards.keypersonnel': {
'Meta': {'object_name': 'KeyPersonnel'},
'academic_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'calendar_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'effort': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'project_role': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Proposal']"}),
'summer_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'})
},
u'awards.performancesite': {
'Meta': {'object_name': 'PerformanceSite'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Proposal']"}),
'ps_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_country': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'ps_duns': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ps_organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'ps_street1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_street2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ps_zipcode': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
u'awards.primesponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'PrimeSponsor'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.IntegerField', [], {})
},
u'awards.priorapproval': {
'Meta': {'object_name': 'PriorApproval'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_submitted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'request': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'})
},
u'awards.proposal': {
'Meta': {'object_name': 'Proposal'},
'academic_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'agency_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.FundingSource']", 'null': 'True', 'blank': 'True'}),
'agency_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'application_type_code': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'are_human_subjects_used': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'are_vertebrate_animals_used': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']", 'null': 'True', 'blank': 'True'}),
'budget_first_per_end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'budget_first_per_start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'calendar_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cost_shr_mand_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'cost_shr_mand_is_committed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cost_shr_mand_source': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'cost_shr_vol_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'cost_shr_vol_is_committed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cost_shr_vol_source': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'department_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardOrganization']", 'null': 'True', 'blank': 'True'}),
'departmental_id_primary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'departmental_id_secondary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'departmental_name_primary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'departmental_name_secondary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'division_name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'dummy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'duns_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'federal_identifier': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'iacuc_approval_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'iacuc_protocol_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irb_protocol_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'irb_review_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'is_agency_cert_doc_attached': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_change_in_grantee_inst': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_cost_shr_auth_attached': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_first_proposal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_haz_mat': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_iacuc_review_pending': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_irb_review_pending': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_signed_coi_disc_attached': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_signed_ip_waiver_attached': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_status_waiver_required': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_subcontract': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'lotus_agency_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'lotus_department_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'lotus_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'naics_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'opportunity_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'opportunity_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'principal_investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardManager']", 'null': 'True', 'blank': 'True'}),
'project_end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'project_start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'project_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'project_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'proposal_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'proposal_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'responsible_entity': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'sponsor_deadline': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'summer_months': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_direct_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y10': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'total_indirect_costs_y9': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'tracking_number': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'who_is_prime': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.PrimeSponsor']", 'null': 'True', 'blank': 'True'}),
'will_involve_foreign_contract': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'will_involve_foreign_nationals': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'will_involve_shipment': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.proposalintake': {
'Meta': {'object_name': 'ProposalIntake'},
'agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'announcement_link': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'award': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['awards.Award']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fcoi_submitted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jit_request': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'jit_response_submitted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'phs_funded': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'prime_sponsor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'principal_investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardManager']", 'null': 'True', 'blank': 'True'}),
'program_announcement': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'proposal_due_to_aor': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'proposal_due_to_ovpr': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'proposal_due_to_sponsor': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'proposal_outcome': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'spa1': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'})
},
u'awards.ptanumber': {
'Meta': {'object_name': 'PTANumber'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'award_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'banner_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'cs_banner_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'principal_investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.AwardManager']", 'null': 'True', 'blank': 'True'}),
'project_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'task_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'total_pta_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
u'awards.reportsubmission': {
'Meta': {'object_name': 'ReportSubmission'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'submitted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'awards.subaward': {
'Meta': {'object_name': 'Subaward'},
'agreement_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'approval_expiration': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'assist': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['awards.Award']"}),
'cfda_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'citi_cleared': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_information': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_fully_executed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'debarment_check': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'duns_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ein': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fain': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fcoi_cleared': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'ffata_reportable': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ffata_submitted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'funding_mechanism': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'gw_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'international': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'modification_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'other_mechanism': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'reminder': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'risk': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'sent': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'subaward_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subaward_ready': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subaward_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'subrecipient_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'tech_report_due': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'tech_report_received': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['awards'] | true | true |
1c31764e1f79caf9d7c34c2dfa652c6dd53a466a | 943 | py | Python | salt/modules/saltcloudmod.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | salt/modules/saltcloudmod.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 1 | 2017-07-10T21:44:39.000Z | 2017-07-10T21:44:39.000Z | salt/modules/saltcloudmod.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | """
Control a salt cloud system
"""
import salt.utils.data
import salt.utils.json
HAS_CLOUD = False
try:
import saltcloud # pylint: disable=W0611
HAS_CLOUD = True
except ImportError:
pass
# Define the module's virtual name
__virtualname__ = "saltcloud"
def __virtual__():
"""
Only load if salt cloud is installed
"""
if HAS_CLOUD:
return __virtualname__
return (
False,
"The saltcloudmod execution module failed to load: requires the saltcloud library.",
)
def create(name, profile):
"""
Create the named vm
CLI Example:
.. code-block:: bash
salt <minion-id> saltcloud.create webserver rackspace_centos_512
"""
cmd = "salt-cloud --out json -p {} {}".format(profile, name)
out = __salt__["cmd.run_stdout"](cmd, python_shell=False)
try:
ret = salt.utils.json.loads(out)
except ValueError:
ret = {}
return ret
| 19.244898 | 92 | 0.639449 |
import salt.utils.data
import salt.utils.json
HAS_CLOUD = False
try:
import saltcloud
HAS_CLOUD = True
except ImportError:
pass
__virtualname__ = "saltcloud"
def __virtual__():
if HAS_CLOUD:
return __virtualname__
return (
False,
"The saltcloudmod execution module failed to load: requires the saltcloud library.",
)
def create(name, profile):
cmd = "salt-cloud --out json -p {} {}".format(profile, name)
out = __salt__["cmd.run_stdout"](cmd, python_shell=False)
try:
ret = salt.utils.json.loads(out)
except ValueError:
ret = {}
return ret
| true | true |
1c3176f39a886ad6e009ed8dfcde68ef9740eead | 20,769 | py | Python | tests/callbacks/test_progress_bar.py | Tshimanga/pytorch-lightning | ff7305f74d4e097a45b6a4d8c0fff6d4f5aaa386 | [
"Apache-2.0"
] | 3 | 2021-05-06T11:31:20.000Z | 2021-05-21T10:37:03.000Z | tests/callbacks/test_progress_bar.py | Tshimanga/pytorch-lightning | ff7305f74d4e097a45b6a4d8c0fff6d4f5aaa386 | [
"Apache-2.0"
] | null | null | null | tests/callbacks/test_progress_bar.py | Tshimanga/pytorch-lightning | ff7305f74d4e097a45b6a4d8c0fff6d4f5aaa386 | [
"Apache-2.0"
] | 2 | 2021-06-10T21:46:37.000Z | 2021-08-24T18:49:17.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import sys
from typing import Optional, Union
from unittest import mock
from unittest.mock import ANY, call, Mock
import pytest
import torch
from torch.utils.data.dataloader import DataLoader
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, ProgressBar, ProgressBarBase
from pytorch_lightning.callbacks.progress import tqdm
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
@pytest.mark.parametrize(
"callbacks,refresh_rate",
[
([], None),
([], 1),
([], 2),
([ProgressBar(refresh_rate=1)], 0),
([ProgressBar(refresh_rate=2)], 0),
([ProgressBar(refresh_rate=2)], 1),
],
)
def test_progress_bar_on(tmpdir, callbacks: list, refresh_rate: Optional[int]):
"""Test different ways the progress bar can be turned on."""
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=callbacks,
progress_bar_refresh_rate=refresh_rate,
max_epochs=1,
overfit_batches=5,
)
progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBarBase)]
# Trainer supports only a single progress bar callback at the moment
assert len(progress_bars) == 1
assert progress_bars[0] is trainer.progress_bar_callback
@pytest.mark.parametrize("callbacks,refresh_rate", [([], 0), ([], False), ([ModelCheckpoint(dirpath="../trainer")], 0)])
def test_progress_bar_off(tmpdir, callbacks: list, refresh_rate: Union[bool, int]):
"""Test different ways the progress bar can be turned off."""
trainer = Trainer(default_root_dir=tmpdir, callbacks=callbacks, progress_bar_refresh_rate=refresh_rate)
progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBar)]
assert 0 == len(progress_bars)
assert not trainer.progress_bar_callback
def test_progress_bar_misconfiguration():
"""Test that Trainer doesn't accept multiple progress bars."""
callbacks = [ProgressBar(), ProgressBar(), ModelCheckpoint(dirpath="../trainer")]
with pytest.raises(MisconfigurationException, match=r"^You added multiple progress bar callbacks"):
Trainer(callbacks=callbacks)
def test_progress_bar_totals(tmpdir):
"""Test that the progress finishes with the correct total steps processed."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=1, max_epochs=1)
bar = trainer.progress_bar_callback
assert 0 == bar.total_train_batches
assert 0 == bar.total_val_batches
assert 0 == bar.total_test_batches
trainer.fit(model)
# check main progress bar total
n = bar.total_train_batches
m = bar.total_val_batches
assert len(trainer.train_dataloader) == n
assert bar.main_progress_bar.total == n + m
# check val progress bar total
assert sum(len(loader) for loader in trainer.val_dataloaders) == m
assert bar.val_progress_bar.total == m
# main progress bar should have reached the end (train batches + val batches)
assert bar.main_progress_bar.n == n + m
assert bar.train_batch_idx == n
# val progress bar should have reached the end
assert bar.val_progress_bar.n == m
assert bar.val_batch_idx == m
# check that the test progress bar is off
assert 0 == bar.total_test_batches
assert bar.test_progress_bar is None
trainer.validate(model)
assert bar.val_progress_bar.total == m
assert bar.val_progress_bar.n == m
assert bar.val_batch_idx == m
trainer.test(model)
# check test progress bar total
k = bar.total_test_batches
assert sum(len(loader) for loader in trainer.test_dataloaders) == k
assert bar.test_progress_bar.total == k
# test progress bar should have reached the end
assert bar.test_progress_bar.n == k
assert bar.test_batch_idx == k
def test_progress_bar_fast_dev_run(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
progress_bar = trainer.progress_bar_callback
assert 1 == progress_bar.total_train_batches
# total val batches are known only after val dataloaders have reloaded
assert 1 == progress_bar.total_val_batches
assert 1 == progress_bar.train_batch_idx
assert 1 == progress_bar.val_batch_idx
assert 0 == progress_bar.test_batch_idx
# the main progress bar should display 2 batches (1 train, 1 val)
assert 2 == progress_bar.main_progress_bar.total
assert 2 == progress_bar.main_progress_bar.n
trainer.validate(model)
# the validation progress bar should display 1 batch
assert 1 == progress_bar.val_batch_idx
assert 1 == progress_bar.val_progress_bar.total
assert 1 == progress_bar.val_progress_bar.n
trainer.test(model)
# the test progress bar should display 1 batch
assert 1 == progress_bar.test_batch_idx
assert 1 == progress_bar.test_progress_bar.total
assert 1 == progress_bar.test_progress_bar.n
@pytest.mark.parametrize("refresh_rate", [0, 1, 50])
def test_progress_bar_progress_refresh(tmpdir, refresh_rate: int):
"""Test that the three progress bars get correctly updated when using different refresh rates."""
model = BoringModel()
class CurrentProgressBar(ProgressBar):
train_batches_seen = 0
val_batches_seen = 0
test_batches_seen = 0
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
super().on_train_batch_start(trainer, pl_module, batch, batch_idx, dataloader_idx)
assert self.train_batch_idx == trainer.fit_loop.batch_idx
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
super().on_train_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
assert self.train_batch_idx == trainer.fit_loop.batch_idx + 1
if not self.is_disabled and self.train_batch_idx % self.refresh_rate == 0:
assert self.main_progress_bar.n == self.train_batch_idx
self.train_batches_seen += 1
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
super().on_validation_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
if not self.is_disabled and self.val_batch_idx % self.refresh_rate == 0:
assert self.val_progress_bar.n == self.val_batch_idx
self.val_batches_seen += 1
def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
super().on_test_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
if not self.is_disabled and self.test_batch_idx % self.refresh_rate == 0:
assert self.test_progress_bar.n == self.test_batch_idx
self.test_batches_seen += 1
progress_bar = CurrentProgressBar(refresh_rate=refresh_rate)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[progress_bar],
progress_bar_refresh_rate=101, # should not matter if custom callback provided
limit_train_batches=1.0,
num_sanity_val_steps=2,
max_epochs=3,
)
assert trainer.progress_bar_callback.refresh_rate == refresh_rate
trainer.fit(model)
assert progress_bar.train_batches_seen == 3 * progress_bar.total_train_batches
assert progress_bar.val_batches_seen == 3 * progress_bar.total_val_batches + trainer.num_sanity_val_steps
assert progress_bar.test_batches_seen == 0
trainer.validate(model)
assert progress_bar.train_batches_seen == 3 * progress_bar.total_train_batches
assert progress_bar.val_batches_seen == 4 * progress_bar.total_val_batches + trainer.num_sanity_val_steps
assert progress_bar.test_batches_seen == 0
trainer.test(model)
assert progress_bar.train_batches_seen == 3 * progress_bar.total_train_batches
assert progress_bar.val_batches_seen == 4 * progress_bar.total_val_batches + trainer.num_sanity_val_steps
assert progress_bar.test_batches_seen == progress_bar.total_test_batches
@pytest.mark.parametrize("limit_val_batches", (0, 5))
def test_num_sanity_val_steps_progress_bar(tmpdir, limit_val_batches: int):
"""
Test val_progress_bar total with 'num_sanity_val_steps' Trainer argument.
"""
class CurrentProgressBar(ProgressBar):
val_pbar_total = 0
sanity_pbar_total = 0
def on_sanity_check_end(self, *args):
self.sanity_pbar_total = self.val_progress_bar.total
super().on_sanity_check_end(*args)
def on_validation_epoch_end(self, *args):
self.val_pbar_total = self.val_progress_bar.total
super().on_validation_epoch_end(*args)
model = BoringModel()
progress_bar = CurrentProgressBar()
num_sanity_val_steps = 2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
num_sanity_val_steps=num_sanity_val_steps,
limit_train_batches=1,
limit_val_batches=limit_val_batches,
callbacks=[progress_bar],
logger=False,
checkpoint_callback=False,
)
trainer.fit(model)
assert progress_bar.sanity_pbar_total == min(num_sanity_val_steps, limit_val_batches)
assert progress_bar.val_pbar_total == limit_val_batches
def test_progress_bar_default_value(tmpdir):
"""Test that a value of None defaults to refresh rate 1."""
trainer = Trainer(default_root_dir=tmpdir)
assert trainer.progress_bar_callback.refresh_rate == 1
trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=None)
assert trainer.progress_bar_callback.refresh_rate == 1
@mock.patch.dict(os.environ, {"COLAB_GPU": "1"})
def test_progress_bar_value_on_colab(tmpdir):
"""Test that Trainer will override the default in Google COLAB."""
trainer = Trainer(default_root_dir=tmpdir)
assert trainer.progress_bar_callback.refresh_rate == 20
trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=None)
assert trainer.progress_bar_callback.refresh_rate == 20
trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=19)
assert trainer.progress_bar_callback.refresh_rate == 19
class MockedUpdateProgressBars(ProgressBar):
"""Mocks the update method once bars get initializied."""
def _mock_bar_update(self, bar):
bar.update = Mock(wraps=bar.update)
return bar
def init_train_tqdm(self):
bar = super().init_train_tqdm()
return self._mock_bar_update(bar)
def init_validation_tqdm(self):
bar = super().init_validation_tqdm()
return self._mock_bar_update(bar)
def init_test_tqdm(self):
bar = super().init_test_tqdm()
return self._mock_bar_update(bar)
@pytest.mark.parametrize(
"train_batches,val_batches,refresh_rate,train_deltas,val_deltas",
[
[2, 3, 1, [1, 1, 1, 1, 1], [1, 1, 1]],
[0, 0, 3, [], []],
[1, 0, 3, [1], []],
[1, 1, 3, [2], [1]],
[5, 0, 3, [3, 2], []],
[5, 2, 3, [3, 3, 1], [2]],
[5, 2, 6, [6, 1], [2]],
],
)
def test_main_progress_bar_update_amount(
tmpdir, train_batches: int, val_batches: int, refresh_rate: int, train_deltas: list, val_deltas: list
):
"""
Test that the main progress updates with the correct amount together with the val progress. At the end of
the epoch, the progress must not overshoot if the number of steps is not divisible by the refresh rate.
"""
model = BoringModel()
progress_bar = MockedUpdateProgressBars(refresh_rate=refresh_rate)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=train_batches,
limit_val_batches=val_batches,
callbacks=[progress_bar],
logger=False,
checkpoint_callback=False,
)
trainer.fit(model)
if train_batches > 0:
progress_bar.main_progress_bar.update.assert_has_calls([call(delta) for delta in train_deltas])
if val_batches > 0:
progress_bar.val_progress_bar.update.assert_has_calls([call(delta) for delta in val_deltas])
@pytest.mark.parametrize("test_batches,refresh_rate,test_deltas", [[1, 3, [1]], [3, 1, [1, 1, 1]], [5, 3, [3, 2]]])
def test_test_progress_bar_update_amount(tmpdir, test_batches: int, refresh_rate: int, test_deltas: list):
"""
Test that test progress updates with the correct amount.
"""
model = BoringModel()
progress_bar = MockedUpdateProgressBars(refresh_rate=refresh_rate)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_test_batches=test_batches,
callbacks=[progress_bar],
logger=False,
checkpoint_callback=False,
)
trainer.test(model)
progress_bar.test_progress_bar.update.assert_has_calls([call(delta) for delta in test_deltas])
def test_tensor_to_float_conversion(tmpdir):
"""Check tensor gets converted to float"""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.log("a", torch.tensor(0.123), prog_bar=True, on_epoch=False)
self.log("b", {"b1": torch.tensor([1])}, prog_bar=True, on_epoch=False)
self.log("c", {"c1": 2}, prog_bar=True, on_epoch=False)
return super().training_step(batch, batch_idx)
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=2, logger=False, checkpoint_callback=False
)
trainer.fit(TestModel())
torch.testing.assert_allclose(trainer.progress_bar_metrics["a"], 0.123)
assert trainer.progress_bar_metrics["b"] == {"b1": 1.0}
assert trainer.progress_bar_metrics["c"] == {"c1": 2.0}
pbar = trainer.progress_bar_callback.main_progress_bar
actual = str(pbar.postfix)
assert actual.endswith("a=0.123, b={'b1': 1.0}, c={'c1': 2.0}"), actual
@pytest.mark.parametrize(
"input_num, expected",
[
[1, "1"],
[1.0, "1.000"],
[0.1, "0.100"],
[1e-3, "0.001"],
[1e-5, "1e-5"],
["1.0", "1.000"],
["10000", "10000"],
["abc", "abc"],
],
)
def test_tqdm_format_num(input_num: Union[str, int, float], expected: str):
"""Check that the specialized tqdm.format_num appends 0 to floats and strings"""
assert tqdm.format_num(input_num) == expected
class PrintModel(BoringModel):
def training_step(self, *args, **kwargs):
self.print("training_step", end="")
return super().training_step(*args, **kwargs)
def validation_step(self, *args, **kwargs):
self.print("validation_step", file=sys.stderr)
return super().validation_step(*args, **kwargs)
def test_step(self, *args, **kwargs):
self.print("test_step")
return super().test_step(*args, **kwargs)
def predict_step(self, *args, **kwargs):
self.print("predict_step")
return super().predict_step(*args, **kwargs)
@mock.patch("pytorch_lightning.callbacks.progress.tqdm.write")
def test_progress_bar_print(tqdm_write, tmpdir):
"""Test that printing in the LightningModule redirects arguments to the progress bar."""
model = PrintModel()
bar = ProgressBar()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=[bar],
)
trainer.fit(model)
trainer.test(model)
trainer.predict(model)
assert tqdm_write.call_count == 4
assert tqdm_write.call_args_list == [
call("training_step", end="", file=None, nolock=False),
call("validation_step", end=os.linesep, file=sys.stderr, nolock=False),
call("test_step", end=os.linesep, file=None, nolock=False),
call("predict_step", end=os.linesep, file=None, nolock=False),
]
@mock.patch("pytorch_lightning.callbacks.progress.tqdm.write")
def test_progress_bar_print_no_train(tqdm_write, tmpdir):
"""Test that printing in the LightningModule redirects arguments to the progress bar without training."""
model = PrintModel()
bar = ProgressBar()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=[bar],
)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
assert tqdm_write.call_count == 3
assert tqdm_write.call_args_list == [
call("validation_step", end=os.linesep, file=sys.stderr, nolock=False),
call("test_step", end=os.linesep, file=None, nolock=False),
call("predict_step", end=os.linesep, file=None, nolock=False),
]
@mock.patch("builtins.print")
@mock.patch("pytorch_lightning.callbacks.progress.tqdm.write")
def test_progress_bar_print_disabled(tqdm_write, mock_print, tmpdir):
"""Test that printing in LightningModule goes through built-in print function when progress bar is disabled."""
model = PrintModel()
bar = ProgressBar()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=[bar],
)
bar.disable()
trainer.fit(model)
trainer.test(model, verbose=False)
trainer.predict(model)
mock_print.assert_has_calls(
[call("training_step", end=""), call("validation_step", file=ANY), call("test_step"), call("predict_step")]
)
tqdm_write.assert_not_called()
def test_progress_bar_can_be_pickled():
bar = ProgressBar()
trainer = Trainer(fast_dev_run=True, callbacks=[bar], max_steps=1)
model = BoringModel()
pickle.dumps(bar)
trainer.fit(model)
pickle.dumps(bar)
trainer.test(model)
pickle.dumps(bar)
trainer.predict(model)
pickle.dumps(bar)
@RunIf(min_gpus=2, special=True)
def test_progress_bar_max_val_check_interval_0(tmpdir):
_test_progress_bar_max_val_check_interval(
tmpdir, total_train_samples=8, train_batch_size=4, total_val_samples=2, val_batch_size=1, val_check_interval=0.2
)
@RunIf(min_gpus=2, special=True)
def test_progress_bar_max_val_check_interval_1(tmpdir):
_test_progress_bar_max_val_check_interval(
tmpdir, total_train_samples=8, train_batch_size=4, total_val_samples=2, val_batch_size=1, val_check_interval=0.5
)
def _test_progress_bar_max_val_check_interval(
tmpdir, total_train_samples, train_batch_size, total_val_samples, val_batch_size, val_check_interval
):
world_size = 2
train_data = DataLoader(RandomDataset(32, total_train_samples), batch_size=train_batch_size)
val_data = DataLoader(RandomDataset(32, total_val_samples), batch_size=val_batch_size)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
max_epochs=1,
weights_summary=None,
val_check_interval=val_check_interval,
gpus=world_size,
accelerator="ddp",
)
trainer.fit(model, train_dataloader=train_data, val_dataloaders=val_data)
total_train_batches = total_train_samples // (train_batch_size * world_size)
val_check_batch = max(1, int(total_train_batches * val_check_interval))
assert trainer.val_check_batch == val_check_batch
val_checks_per_epoch = total_train_batches / val_check_batch
total_val_batches = total_val_samples // (val_batch_size * world_size)
assert trainer.progress_bar_callback.total_train_batches == total_train_batches
assert trainer.progress_bar_callback.total_val_batches == total_val_batches
total_val_batches = total_val_batches * val_checks_per_epoch
if trainer.is_global_zero:
assert trainer.progress_bar_callback.main_progress_bar.total == total_train_batches + total_val_batches
| 37.02139 | 120 | 0.708363 |
import os
import pickle
import sys
from typing import Optional, Union
from unittest import mock
from unittest.mock import ANY, call, Mock
import pytest
import torch
from torch.utils.data.dataloader import DataLoader
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, ProgressBar, ProgressBarBase
from pytorch_lightning.callbacks.progress import tqdm
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
@pytest.mark.parametrize(
"callbacks,refresh_rate",
[
([], None),
([], 1),
([], 2),
([ProgressBar(refresh_rate=1)], 0),
([ProgressBar(refresh_rate=2)], 0),
([ProgressBar(refresh_rate=2)], 1),
],
)
def test_progress_bar_on(tmpdir, callbacks: list, refresh_rate: Optional[int]):
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=callbacks,
progress_bar_refresh_rate=refresh_rate,
max_epochs=1,
overfit_batches=5,
)
progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBarBase)]
assert len(progress_bars) == 1
assert progress_bars[0] is trainer.progress_bar_callback
@pytest.mark.parametrize("callbacks,refresh_rate", [([], 0), ([], False), ([ModelCheckpoint(dirpath="../trainer")], 0)])
def test_progress_bar_off(tmpdir, callbacks: list, refresh_rate: Union[bool, int]):
trainer = Trainer(default_root_dir=tmpdir, callbacks=callbacks, progress_bar_refresh_rate=refresh_rate)
progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBar)]
assert 0 == len(progress_bars)
assert not trainer.progress_bar_callback
def test_progress_bar_misconfiguration():
callbacks = [ProgressBar(), ProgressBar(), ModelCheckpoint(dirpath="../trainer")]
with pytest.raises(MisconfigurationException, match=r"^You added multiple progress bar callbacks"):
Trainer(callbacks=callbacks)
def test_progress_bar_totals(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=1, max_epochs=1)
bar = trainer.progress_bar_callback
assert 0 == bar.total_train_batches
assert 0 == bar.total_val_batches
assert 0 == bar.total_test_batches
trainer.fit(model)
n = bar.total_train_batches
m = bar.total_val_batches
assert len(trainer.train_dataloader) == n
assert bar.main_progress_bar.total == n + m
assert sum(len(loader) for loader in trainer.val_dataloaders) == m
assert bar.val_progress_bar.total == m
assert bar.main_progress_bar.n == n + m
assert bar.train_batch_idx == n
assert bar.val_progress_bar.n == m
assert bar.val_batch_idx == m
assert 0 == bar.total_test_batches
assert bar.test_progress_bar is None
trainer.validate(model)
assert bar.val_progress_bar.total == m
assert bar.val_progress_bar.n == m
assert bar.val_batch_idx == m
trainer.test(model)
k = bar.total_test_batches
assert sum(len(loader) for loader in trainer.test_dataloaders) == k
assert bar.test_progress_bar.total == k
assert bar.test_progress_bar.n == k
assert bar.test_batch_idx == k
def test_progress_bar_fast_dev_run(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
progress_bar = trainer.progress_bar_callback
assert 1 == progress_bar.total_train_batches
assert 1 == progress_bar.total_val_batches
assert 1 == progress_bar.train_batch_idx
assert 1 == progress_bar.val_batch_idx
assert 0 == progress_bar.test_batch_idx
assert 2 == progress_bar.main_progress_bar.total
assert 2 == progress_bar.main_progress_bar.n
trainer.validate(model)
assert 1 == progress_bar.val_batch_idx
assert 1 == progress_bar.val_progress_bar.total
assert 1 == progress_bar.val_progress_bar.n
trainer.test(model)
assert 1 == progress_bar.test_batch_idx
assert 1 == progress_bar.test_progress_bar.total
assert 1 == progress_bar.test_progress_bar.n
@pytest.mark.parametrize("refresh_rate", [0, 1, 50])
def test_progress_bar_progress_refresh(tmpdir, refresh_rate: int):
model = BoringModel()
class CurrentProgressBar(ProgressBar):
train_batches_seen = 0
val_batches_seen = 0
test_batches_seen = 0
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
super().on_train_batch_start(trainer, pl_module, batch, batch_idx, dataloader_idx)
assert self.train_batch_idx == trainer.fit_loop.batch_idx
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
super().on_train_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
assert self.train_batch_idx == trainer.fit_loop.batch_idx + 1
if not self.is_disabled and self.train_batch_idx % self.refresh_rate == 0:
assert self.main_progress_bar.n == self.train_batch_idx
self.train_batches_seen += 1
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
super().on_validation_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
if not self.is_disabled and self.val_batch_idx % self.refresh_rate == 0:
assert self.val_progress_bar.n == self.val_batch_idx
self.val_batches_seen += 1
def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
super().on_test_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
if not self.is_disabled and self.test_batch_idx % self.refresh_rate == 0:
assert self.test_progress_bar.n == self.test_batch_idx
self.test_batches_seen += 1
progress_bar = CurrentProgressBar(refresh_rate=refresh_rate)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[progress_bar],
progress_bar_refresh_rate=101,
limit_train_batches=1.0,
num_sanity_val_steps=2,
max_epochs=3,
)
assert trainer.progress_bar_callback.refresh_rate == refresh_rate
trainer.fit(model)
assert progress_bar.train_batches_seen == 3 * progress_bar.total_train_batches
assert progress_bar.val_batches_seen == 3 * progress_bar.total_val_batches + trainer.num_sanity_val_steps
assert progress_bar.test_batches_seen == 0
trainer.validate(model)
assert progress_bar.train_batches_seen == 3 * progress_bar.total_train_batches
assert progress_bar.val_batches_seen == 4 * progress_bar.total_val_batches + trainer.num_sanity_val_steps
assert progress_bar.test_batches_seen == 0
trainer.test(model)
assert progress_bar.train_batches_seen == 3 * progress_bar.total_train_batches
assert progress_bar.val_batches_seen == 4 * progress_bar.total_val_batches + trainer.num_sanity_val_steps
assert progress_bar.test_batches_seen == progress_bar.total_test_batches
@pytest.mark.parametrize("limit_val_batches", (0, 5))
def test_num_sanity_val_steps_progress_bar(tmpdir, limit_val_batches: int):
class CurrentProgressBar(ProgressBar):
val_pbar_total = 0
sanity_pbar_total = 0
def on_sanity_check_end(self, *args):
self.sanity_pbar_total = self.val_progress_bar.total
super().on_sanity_check_end(*args)
def on_validation_epoch_end(self, *args):
self.val_pbar_total = self.val_progress_bar.total
super().on_validation_epoch_end(*args)
model = BoringModel()
progress_bar = CurrentProgressBar()
num_sanity_val_steps = 2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
num_sanity_val_steps=num_sanity_val_steps,
limit_train_batches=1,
limit_val_batches=limit_val_batches,
callbacks=[progress_bar],
logger=False,
checkpoint_callback=False,
)
trainer.fit(model)
assert progress_bar.sanity_pbar_total == min(num_sanity_val_steps, limit_val_batches)
assert progress_bar.val_pbar_total == limit_val_batches
def test_progress_bar_default_value(tmpdir):
trainer = Trainer(default_root_dir=tmpdir)
assert trainer.progress_bar_callback.refresh_rate == 1
trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=None)
assert trainer.progress_bar_callback.refresh_rate == 1
@mock.patch.dict(os.environ, {"COLAB_GPU": "1"})
def test_progress_bar_value_on_colab(tmpdir):
trainer = Trainer(default_root_dir=tmpdir)
assert trainer.progress_bar_callback.refresh_rate == 20
trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=None)
assert trainer.progress_bar_callback.refresh_rate == 20
trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=19)
assert trainer.progress_bar_callback.refresh_rate == 19
class MockedUpdateProgressBars(ProgressBar):
def _mock_bar_update(self, bar):
bar.update = Mock(wraps=bar.update)
return bar
def init_train_tqdm(self):
bar = super().init_train_tqdm()
return self._mock_bar_update(bar)
def init_validation_tqdm(self):
bar = super().init_validation_tqdm()
return self._mock_bar_update(bar)
def init_test_tqdm(self):
bar = super().init_test_tqdm()
return self._mock_bar_update(bar)
@pytest.mark.parametrize(
"train_batches,val_batches,refresh_rate,train_deltas,val_deltas",
[
[2, 3, 1, [1, 1, 1, 1, 1], [1, 1, 1]],
[0, 0, 3, [], []],
[1, 0, 3, [1], []],
[1, 1, 3, [2], [1]],
[5, 0, 3, [3, 2], []],
[5, 2, 3, [3, 3, 1], [2]],
[5, 2, 6, [6, 1], [2]],
],
)
def test_main_progress_bar_update_amount(
tmpdir, train_batches: int, val_batches: int, refresh_rate: int, train_deltas: list, val_deltas: list
):
model = BoringModel()
progress_bar = MockedUpdateProgressBars(refresh_rate=refresh_rate)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=train_batches,
limit_val_batches=val_batches,
callbacks=[progress_bar],
logger=False,
checkpoint_callback=False,
)
trainer.fit(model)
if train_batches > 0:
progress_bar.main_progress_bar.update.assert_has_calls([call(delta) for delta in train_deltas])
if val_batches > 0:
progress_bar.val_progress_bar.update.assert_has_calls([call(delta) for delta in val_deltas])
@pytest.mark.parametrize("test_batches,refresh_rate,test_deltas", [[1, 3, [1]], [3, 1, [1, 1, 1]], [5, 3, [3, 2]]])
def test_test_progress_bar_update_amount(tmpdir, test_batches: int, refresh_rate: int, test_deltas: list):
model = BoringModel()
progress_bar = MockedUpdateProgressBars(refresh_rate=refresh_rate)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_test_batches=test_batches,
callbacks=[progress_bar],
logger=False,
checkpoint_callback=False,
)
trainer.test(model)
progress_bar.test_progress_bar.update.assert_has_calls([call(delta) for delta in test_deltas])
def test_tensor_to_float_conversion(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.log("a", torch.tensor(0.123), prog_bar=True, on_epoch=False)
self.log("b", {"b1": torch.tensor([1])}, prog_bar=True, on_epoch=False)
self.log("c", {"c1": 2}, prog_bar=True, on_epoch=False)
return super().training_step(batch, batch_idx)
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=2, logger=False, checkpoint_callback=False
)
trainer.fit(TestModel())
torch.testing.assert_allclose(trainer.progress_bar_metrics["a"], 0.123)
assert trainer.progress_bar_metrics["b"] == {"b1": 1.0}
assert trainer.progress_bar_metrics["c"] == {"c1": 2.0}
pbar = trainer.progress_bar_callback.main_progress_bar
actual = str(pbar.postfix)
assert actual.endswith("a=0.123, b={'b1': 1.0}, c={'c1': 2.0}"), actual
@pytest.mark.parametrize(
"input_num, expected",
[
[1, "1"],
[1.0, "1.000"],
[0.1, "0.100"],
[1e-3, "0.001"],
[1e-5, "1e-5"],
["1.0", "1.000"],
["10000", "10000"],
["abc", "abc"],
],
)
def test_tqdm_format_num(input_num: Union[str, int, float], expected: str):
assert tqdm.format_num(input_num) == expected
class PrintModel(BoringModel):
def training_step(self, *args, **kwargs):
self.print("training_step", end="")
return super().training_step(*args, **kwargs)
def validation_step(self, *args, **kwargs):
self.print("validation_step", file=sys.stderr)
return super().validation_step(*args, **kwargs)
def test_step(self, *args, **kwargs):
self.print("test_step")
return super().test_step(*args, **kwargs)
def predict_step(self, *args, **kwargs):
self.print("predict_step")
return super().predict_step(*args, **kwargs)
@mock.patch("pytorch_lightning.callbacks.progress.tqdm.write")
def test_progress_bar_print(tqdm_write, tmpdir):
model = PrintModel()
bar = ProgressBar()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=[bar],
)
trainer.fit(model)
trainer.test(model)
trainer.predict(model)
assert tqdm_write.call_count == 4
assert tqdm_write.call_args_list == [
call("training_step", end="", file=None, nolock=False),
call("validation_step", end=os.linesep, file=sys.stderr, nolock=False),
call("test_step", end=os.linesep, file=None, nolock=False),
call("predict_step", end=os.linesep, file=None, nolock=False),
]
@mock.patch("pytorch_lightning.callbacks.progress.tqdm.write")
def test_progress_bar_print_no_train(tqdm_write, tmpdir):
model = PrintModel()
bar = ProgressBar()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=[bar],
)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
assert tqdm_write.call_count == 3
assert tqdm_write.call_args_list == [
call("validation_step", end=os.linesep, file=sys.stderr, nolock=False),
call("test_step", end=os.linesep, file=None, nolock=False),
call("predict_step", end=os.linesep, file=None, nolock=False),
]
@mock.patch("builtins.print")
@mock.patch("pytorch_lightning.callbacks.progress.tqdm.write")
def test_progress_bar_print_disabled(tqdm_write, mock_print, tmpdir):
model = PrintModel()
bar = ProgressBar()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
max_steps=1,
callbacks=[bar],
)
bar.disable()
trainer.fit(model)
trainer.test(model, verbose=False)
trainer.predict(model)
mock_print.assert_has_calls(
[call("training_step", end=""), call("validation_step", file=ANY), call("test_step"), call("predict_step")]
)
tqdm_write.assert_not_called()
def test_progress_bar_can_be_pickled():
bar = ProgressBar()
trainer = Trainer(fast_dev_run=True, callbacks=[bar], max_steps=1)
model = BoringModel()
pickle.dumps(bar)
trainer.fit(model)
pickle.dumps(bar)
trainer.test(model)
pickle.dumps(bar)
trainer.predict(model)
pickle.dumps(bar)
@RunIf(min_gpus=2, special=True)
def test_progress_bar_max_val_check_interval_0(tmpdir):
_test_progress_bar_max_val_check_interval(
tmpdir, total_train_samples=8, train_batch_size=4, total_val_samples=2, val_batch_size=1, val_check_interval=0.2
)
@RunIf(min_gpus=2, special=True)
def test_progress_bar_max_val_check_interval_1(tmpdir):
_test_progress_bar_max_val_check_interval(
tmpdir, total_train_samples=8, train_batch_size=4, total_val_samples=2, val_batch_size=1, val_check_interval=0.5
)
def _test_progress_bar_max_val_check_interval(
tmpdir, total_train_samples, train_batch_size, total_val_samples, val_batch_size, val_check_interval
):
world_size = 2
train_data = DataLoader(RandomDataset(32, total_train_samples), batch_size=train_batch_size)
val_data = DataLoader(RandomDataset(32, total_val_samples), batch_size=val_batch_size)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=0,
max_epochs=1,
weights_summary=None,
val_check_interval=val_check_interval,
gpus=world_size,
accelerator="ddp",
)
trainer.fit(model, train_dataloader=train_data, val_dataloaders=val_data)
total_train_batches = total_train_samples // (train_batch_size * world_size)
val_check_batch = max(1, int(total_train_batches * val_check_interval))
assert trainer.val_check_batch == val_check_batch
val_checks_per_epoch = total_train_batches / val_check_batch
total_val_batches = total_val_samples // (val_batch_size * world_size)
assert trainer.progress_bar_callback.total_train_batches == total_train_batches
assert trainer.progress_bar_callback.total_val_batches == total_val_batches
total_val_batches = total_val_batches * val_checks_per_epoch
if trainer.is_global_zero:
assert trainer.progress_bar_callback.main_progress_bar.total == total_train_batches + total_val_batches
| true | true |
1c31771095f6882e62e5f10b15f6894542a873e5 | 1,395 | py | Python | src/relstorage/adapters/sqlite/txncontrol.py | enfold/relstorage | 9fcd526b537cb6537cc2ae33154b63096550f210 | [
"ZPL-2.1"
] | 40 | 2015-10-08T05:35:13.000Z | 2022-03-28T23:50:06.000Z | src/relstorage/adapters/sqlite/txncontrol.py | enfold/relstorage | 9fcd526b537cb6537cc2ae33154b63096550f210 | [
"ZPL-2.1"
] | 364 | 2015-03-23T15:25:42.000Z | 2022-03-17T08:41:34.000Z | src/relstorage/adapters/sqlite/txncontrol.py | enfold/relstorage | 9fcd526b537cb6537cc2ae33154b63096550f210 | [
"ZPL-2.1"
] | 33 | 2015-06-08T23:03:22.000Z | 2022-03-21T08:25:53.000Z | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2019 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..txncontrol import GenericTransactionControl
class Sqlite3TransactionControl(GenericTransactionControl):
def commit_phase2(self, store_connection, txn, load_connection):
# When committing, terminate the load connection's transaction now.
# This allows any actions taken on commit, such as SQLite's auto-checkpoint,
# to see a state where this reader is not holding open old MVCC resources.
# See https://github.com/zodb/relstorage/issues/401
load_connection.rollback_quietly()
GenericTransactionControl.commit_phase2(self, store_connection, txn, load_connection)
| 43.59375 | 93 | 0.67957 | true | true | |
1c3177dddf1dcf0545d75f68f1856da7a39f15fa | 1,570 | py | Python | app/feed/celery_worker.py | AndreMPCosta/padel-checker-pub | ae33727b5662754c9746a8b5adcdcf60b93bcb32 | [
"MIT"
] | null | null | null | app/feed/celery_worker.py | AndreMPCosta/padel-checker-pub | ae33727b5662754c9746a8b5adcdcf60b93bcb32 | [
"MIT"
] | null | null | null | app/feed/celery_worker.py | AndreMPCosta/padel-checker-pub | ae33727b5662754c9746a8b5adcdcf60b93bcb32 | [
"MIT"
] | null | null | null | from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime, timedelta
from os import environ, cpu_count
from celery import Celery
from celery.utils.log import get_task_logger
# Initialize celery
from app.feed.config import cities
from app.feed.fetcher import Zone
celery = Celery('tasks', broker=f'amqp://{environ.get("RABBITMQ_USER")}:{environ.get("RABBITMQ_PASSWORD")}'
f'@localhost:5672', backend='rpc://')
# Create logger - enable to display messages on task logger
celery_log = get_task_logger(__name__)
celery.conf.timezone = 'Europe/Lisbon'
@celery.task
def cycle_days():
processes = []
zones = []
base = datetime.today()
date_list = [(base + timedelta(days=x)).strftime('%Y-%m-%d') for x in range(7)]
all_ids = [x for x in cities.values()]
for _id in all_ids:
zones += [Zone(internal_date=date, id=_id) for date in date_list]
# zones = [Zone(internal_date=date) for date in date_list]
with ThreadPoolExecutor(max_workers=min(32, (cpu_count() or 1) + 4)) as executor:
for zone in zones:
processes.append(executor.submit(refresh_data, zone))
return {"message": "Refresh successful"}
@celery.task
def internal_refresh(z):
refresh_data(z)
# Pull data from API - Run Asynchronously with celery
def refresh_data(z):
z.get_all_courts()
z.court_list.build_clubs()
city_name = next(key for key, value in cities.items() if value == z.id)
celery_log.info(f"Refresh Complete for zone {city_name} - {z.internal_date}")
| 32.040816 | 107 | 0.698089 | from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime, timedelta
from os import environ, cpu_count
from celery import Celery
from celery.utils.log import get_task_logger
from app.feed.config import cities
from app.feed.fetcher import Zone
celery = Celery('tasks', broker=f'amqp://{environ.get("RABBITMQ_USER")}:{environ.get("RABBITMQ_PASSWORD")}'
f'@localhost:5672', backend='rpc://')
celery_log = get_task_logger(__name__)
celery.conf.timezone = 'Europe/Lisbon'
@celery.task
def cycle_days():
processes = []
zones = []
base = datetime.today()
date_list = [(base + timedelta(days=x)).strftime('%Y-%m-%d') for x in range(7)]
all_ids = [x for x in cities.values()]
for _id in all_ids:
zones += [Zone(internal_date=date, id=_id) for date in date_list]
with ThreadPoolExecutor(max_workers=min(32, (cpu_count() or 1) + 4)) as executor:
for zone in zones:
processes.append(executor.submit(refresh_data, zone))
return {"message": "Refresh successful"}
@celery.task
def internal_refresh(z):
refresh_data(z)
def refresh_data(z):
z.get_all_courts()
z.court_list.build_clubs()
city_name = next(key for key, value in cities.items() if value == z.id)
celery_log.info(f"Refresh Complete for zone {city_name} - {z.internal_date}")
| true | true |
1c31798a26d718e325bf89bcf236a1a9a546715a | 2,056 | py | Python | sklvq/activations/_sigmoid.py | rickvanveen/LVQLib | 4fba52a14ed37b0444becb96ef09c40d38d263ff | [
"BSD-3-Clause"
] | 44 | 2020-10-21T19:54:29.000Z | 2022-03-23T15:43:52.000Z | sklvq/activations/_sigmoid.py | rickvanveen/LVQLib | 4fba52a14ed37b0444becb96ef09c40d38d263ff | [
"BSD-3-Clause"
] | 40 | 2020-10-30T13:34:23.000Z | 2021-06-30T09:32:59.000Z | sklvq/activations/_sigmoid.py | rickvanveen/LVQLib | 4fba52a14ed37b0444becb96ef09c40d38d263ff | [
"BSD-3-Clause"
] | 5 | 2021-03-15T13:10:03.000Z | 2021-06-22T16:32:38.000Z | import numpy as np
from typing import Union
from . import ActivationBaseClass
class Sigmoid(ActivationBaseClass):
"""Sigmoid function
Class that holds the sigmoid function and gradient as discussed in `[1]`_
Parameters
----------
beta : int or float, optional, default=1
Positive non-zero value that controls the steepness of the Sigmoid function.
See also
--------
Identity, SoftPlus, Swish
References
----------
_`[1]` Villmann, T., Ravichandran, J., Villmann, A., Nebel, D., & Kaden, M. (2019). "Activation
Functions for Generalized Learning Vector Quantization - A Performance Comparison", 2019.
"""
__slots__ = "beta"
def __init__(self, beta: Union[int, float] = 1):
if beta <= 0:
raise ValueError(
"{}: Expected beta > 0, but got beta = {}".format(
type(self).__name__, beta
)
)
self.beta = beta
def __call__(self, x: np.ndarray) -> np.ndarray:
r"""Computes the sigmoid function:
.. math::
f(\mathbf{x}) = \frac{1}{e^{-\beta \cdot \mathbf{x}} + 1}
Parameters
----------
x : ndarray of any shape.
Returns
-------
ndarray of shape (x.shape)
Elementwise evaluation of the sigmoid function.
"""
return np.asarray(1.0 / (np.exp(-self.beta * x) + 1.0))
def gradient(self, x: np.ndarray) -> np.ndarray:
r"""Computes the sigmoid function's gradient with respect to x:
.. math::
\frac{\partial f}{\partial \mathbf{x}} = \frac{(\beta \cdot e^{\beta \cdot \mathbf{x})}}{(e^{\beta \cdot \mathbf{x}} + 1)^2}
Parameters
----------
x : ndarray of any shape
Returns
-------
ndarray of shape (x.shape)
Elementwise evaluation of the sigmoid function's gradient.
"""
exp = np.exp(self.beta * x)
return np.asarray((self.beta * exp) / (exp + 1.0) ** 2)
| 28.164384 | 139 | 0.540856 | import numpy as np
from typing import Union
from . import ActivationBaseClass
class Sigmoid(ActivationBaseClass):
__slots__ = "beta"
def __init__(self, beta: Union[int, float] = 1):
if beta <= 0:
raise ValueError(
"{}: Expected beta > 0, but got beta = {}".format(
type(self).__name__, beta
)
)
self.beta = beta
def __call__(self, x: np.ndarray) -> np.ndarray:
return np.asarray(1.0 / (np.exp(-self.beta * x) + 1.0))
def gradient(self, x: np.ndarray) -> np.ndarray:
exp = np.exp(self.beta * x)
return np.asarray((self.beta * exp) / (exp + 1.0) ** 2)
| true | true |
1c317a120d5c70dac6a81e0866721442d85a0a3c | 26,751 | py | Python | src/two_way_evaluation.py | furgerf/GAN-for-dermatologic-imaging | e90b06c46c7693e984a4c5b067e18460113cd23b | [
"Apache-2.0"
] | null | null | null | src/two_way_evaluation.py | furgerf/GAN-for-dermatologic-imaging | e90b06c46c7693e984a4c5b067e18460113cd23b | [
"Apache-2.0"
] | 9 | 2020-09-26T01:22:00.000Z | 2022-01-22T18:00:52.000Z | src/two_way_evaluation.py | furgerf/GAN-for-dermatologic-imaging | e90b06c46c7693e984a4c5b067e18460113cd23b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import pickle
import time
from abc import abstractmethod
from datetime import datetime, timedelta
import numpy as np
import tensorflow as tf
from evaluation import Evaluation
from generator_loss_args import GeneratorLossArgs
from perceptual_scores import PerceptualScores
from two_way_metrics import TwoWayMetrics
from utils import get_memory_usage_string, logistic
class TwoWayEvaluation(Evaluation):
def __init__(self, model, config):
assert not config.has_noise_input, "we don't want to translate back to noise"
self._first_generator = None
self._first_generator_optimizer = None
self._first_discriminator = None
self._first_discriminator_optimizer = None
self._second_generator = None
self._second_generator_optimizer = None
self._second_discriminator = None
self._second_discriminator_optimizer = None
self._checkpoint = None
self._final_checkpoint = None
self._perceptual_scores = PerceptualScores(config) if config.target_type == "image" else None
self._reverse_perceptual_scores = PerceptualScores(config) if config.input_type == "image" else None
super(TwoWayEvaluation, self).__init__(model, config)
def set_up_model(self):
tf.logging.info("Setting up models with learing rate {} for G, {} for D".format(
self._model.gen_learning, self._model.disc_learning))
self._first_generator = self._model.get_generator()
self._first_discriminator = self._model.get_discriminator()
has_colored_target = self._config.has_colored_target
self._config.has_colored_target = self._config.has_colored_input
self._second_generator = self._model.get_generator()
self._config.has_colored_target = has_colored_target
self._second_discriminator = self._model.get_discriminator()
# defun gives 10 secs/epoch performance boost
self._first_generator.call = tf.contrib.eager.defun(self._first_generator.call)
self._first_discriminator.call = tf.contrib.eager.defun(self._first_discriminator.call)
self._second_generator.call = tf.contrib.eager.defun(self._second_generator.call)
self._second_discriminator.call = tf.contrib.eager.defun(self._second_discriminator.call)
self._first_generator_optimizer = tf.train.AdamOptimizer(self._model.gen_learning)
self._first_discriminator_optimizer = tf.train.AdamOptimizer(self._model.disc_learning)
self._second_generator_optimizer = tf.train.AdamOptimizer(self._model.gen_learning)
self._second_discriminator_optimizer = tf.train.AdamOptimizer(self._model.disc_learning)
checkpoint = tf.train.Checkpoint(
first_generator_optimizer=self._first_generator_optimizer,
first_discriminator_optimizer=self._first_discriminator_optimizer,
first_generator=self._first_generator,
first_discriminator=self._first_discriminator,
second_generator_optimizer=self._second_generator_optimizer,
second_discriminator_optimizer=self._second_discriminator_optimizer,
second_generator=self._second_generator,
second_discriminator=self._second_discriminator)
self._checkpoint = tf.contrib.checkpoint.CheckpointManager(checkpoint, self._config.checkpoint_dir,
max_to_keep=None if self._config.keep_all_checkpoints else 5)
if self._config.keep_final_checkpoints:
final_checkpoint = tf.train.Checkpoint(
first_generator_optimizer=self._first_generator_optimizer,
first_discriminator_optimizer=self._first_discriminator_optimizer,
first_generator=self._first_generator,
first_discriminator=self._first_discriminator,
second_generator_optimizer=self._second_generator_optimizer,
second_discriminator_optimizer=self._second_discriminator_optimizer,
second_generator=self._second_generator,
second_discriminator=self._second_discriminator)
self._final_checkpoint = tf.contrib.checkpoint.CheckpointManager(final_checkpoint, self._config.final_checkpoint_dir,
max_to_keep=None if self._config.keep_all_checkpoints else 5)
try:
self._model.print_model_summary(self._first_generator, self._second_discriminator, self.epoch_sample_input)
except Exception as ex:
tf.logging.warning("Unable to print model summary ({}: {})".format(ex.__class__.__name__, ex))
if self._perceptual_scores:
self._perceptual_scores.initialize()
if self._reverse_perceptual_scores:
self._reverse_perceptual_scores.initialize(self._config.data_dir)
@property
@abstractmethod
def data_set(self):
"""
The data set to train on. Each batch should consist of a tuple of generator inputs in the first
and in the second domain.
"""
pass
@property
@abstractmethod
def extra_discriminator_data_set(self):
"""
The data set of additional real samples for the SECOND discriminator to train on.
Only makes sense for non-conditioned discriminators.
"""
pass
@property
@abstractmethod
def test_data_set(self):
"""
The data set to train on. Each batch should consist of a tuple of generator inputs in the first
and in the second domain - same as the main data set.
"""
pass
@property
@abstractmethod
def epoch_sample_input(self):
"""
Generator input for the generation of epoch samples.
"""
pass
class TrainingResult:
def __init__(self, gen_loss, gen_losses, disc_loss, disc_on_real, disc_on_generated,
gen_gradients, disc_gradients):
# pylint: disable=too-many-arguments
self.gen_loss = gen_loss
self.gen_losses = gen_losses
self.disc_loss = disc_loss
self.disc_on_real = disc_on_real
self.disc_on_generated = disc_on_generated
self.gen_gradients = gen_gradients
self.disc_gradients = disc_gradients
def train_generator_discriminator_pair(self, generator, discriminator, reverse_generator, batch_input, batch_target):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# FORWARD DIRECTION
generated_images = generator(batch_input, training=True)
if batch_input.shape[-1] == 4:
assert batch_target.shape[-1] == 4
disc_input_real = tf.concat([batch_input, batch_target[:, :, :, :3]], axis=-1) \
if self._config.conditioned_discriminator else batch_target[:, :, :, :3]
disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \
if self._config.conditioned_discriminator else generated_images
else:
assert batch_target.shape[-1] in [1, 3] and batch_input.shape[-1] in [1, 3]
disc_input_real = tf.concat([batch_input, batch_target], axis=-1) \
if self._config.conditioned_discriminator else batch_target
disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \
if self._config.conditioned_discriminator else generated_images
disc_on_real = discriminator(disc_input_real, training=True)
disc_on_generated = discriminator(disc_input_generated, training=True)
# BACKWARD DIRECTION - not training or discriminating reconstructed image
# the input for the reconstruction may need to be augmented with the segmentation
reconstruction_input = tf.concat([generated_images, batch_input[:, :, :, 3:]], axis=-1) if batch_input.shape[-1] > 3 else generated_images
reconstructed_images = reverse_generator(reconstruction_input, training=False)
if self._config.loss_identity:
targets = batch_target
identity_images = generator(batch_target, training=True)
else:
targets = None
identity_images = None
gen_losses = self._model.gen_loss(disc_on_generated, GeneratorLossArgs(generated_images, batch_input,
targets=targets, reconstructed_images=reconstructed_images, identity_images=identity_images))
gen_loss = sum(gen_losses.values())
disc_loss = self._model.disc_loss(disc_on_real, disc_on_generated)
# compute gradients
gradients_of_generator = gen_tape.gradient(gen_loss, generator.variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)
return TwoWayEvaluation.TrainingResult(gen_loss, gen_losses, disc_loss, disc_on_real, disc_on_generated,
gradients_of_generator, gradients_of_discriminator)
def train_discriminator(self, generator, discriminator, batch_input, batch_target):
with tf.GradientTape() as disc_tape:
if generator:
generated_images = generator(batch_input, training=True)
if batch_input.shape[-1] == 4:
assert batch_target.shape[-1] == 4
disc_input_real = tf.concat([batch_input, batch_target[:, :, :, :3]], axis=-1) \
if self._config.conditioned_discriminator else batch_target[:, :, :, :3]
if generator:
disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \
if self._config.conditioned_discriminator else generated_images
else:
assert batch_target.shape[-1] == 3 and batch_input.shape[-1] == 3
disc_input_real = tf.concat([batch_input, batch_target], axis=-1) \
if self._config.conditioned_discriminator else batch_target
if generator:
disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \
if self._config.conditioned_discriminator else generated_images
disc_on_real = discriminator(disc_input_real, training=True)
if generator:
disc_on_generated = discriminator(disc_input_generated, training=True)
else:
disc_on_generated = None
disc_loss = self._model.disc_loss(disc_on_real, disc_on_generated)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)
return TwoWayEvaluation.TrainingResult(None, None, disc_loss, disc_on_real, disc_on_generated,
None, gradients_of_discriminator)
def train(self, epochs, metrics_writer):
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
try:
tf.logging.info("Memory usage before training: {}".format(get_memory_usage_string()))
except: # pylint: disable=bare-except
tf.logging.warning("Unable to get memory usage, no GPU available?")
assert not self._config.train_disc_on_previous_images \
and not self._config.real_image_noise_stdev, "not implemented"
checkpoint_interval = 25 # always have same interval for easier epoch number -> checkpoint-number conversion
gradients_interval = epochs // 5 // 25 * 25 # aim for at least 5 gradients in total but have it a multiple of 25
gradients_interval = 25 if gradients_interval == 0 else min(gradients_interval, 150)
if self._config.scores_every_epoch:
scores_interval = 1
else:
scores_interval = epochs // 10 // 10 * 10 # aim for at least 10 percentual scores in total but have it a multiple of 10
scores_interval = 10 if scores_interval == 0 else min(scores_interval, 25)
tf.logging.info("Intervals: checkpoint {}, scores {}, gradients {}".format(checkpoint_interval, scores_interval, gradients_interval))
for epoch in range(epochs):
start = time.time()
metrics = TwoWayMetrics(epoch+1, 4)
# NOTE, KEEP IN MIND: "first"/"second" refers to the input domain
# ie "first generator" is the generator that receives input from the first domain (and generates for the second)
if self._config.extra_disc_step_real or self._config.extra_disc_step_both:
for batch_number, batch in enumerate(self.data_set):
batch_first_domain, batch_second_domain = batch
forward_result = self.train_discriminator(self._first_generator if self._config.extra_disc_step_both else None,
self._second_discriminator, batch_first_domain, batch_second_domain)
backward_result = self.train_discriminator(self._second_generator if self._config.extra_disc_step_both else None,
self._first_discriminator, batch_second_domain, batch_first_domain)
self._second_discriminator_optimizer.apply_gradients(zip(forward_result.disc_gradients, self._second_discriminator.variables))
self._first_discriminator_optimizer.apply_gradients(zip(backward_result.disc_gradients, self._first_discriminator.variables))
for batch_number, batch in enumerate(self.data_set):
batch_first_domain, batch_second_domain = batch
# evaluate models
forward_result = self.train_generator_discriminator_pair(self._first_generator, self._second_discriminator,
self._second_generator, batch_first_domain, batch_second_domain)
backward_result = self.train_generator_discriminator_pair(self._second_generator, self._first_discriminator,
self._first_generator, batch_second_domain, batch_first_domain)
# store results
metrics.add_losses((forward_result.gen_losses, backward_result.gen_losses), (backward_result.disc_loss, forward_result.disc_loss))
metrics.add_discriminations((logistic(backward_result.disc_on_real), logistic(forward_result.disc_on_real)),
(logistic(backward_result.disc_on_generated), logistic(forward_result.disc_on_generated)))
# train
self._first_generator_optimizer.apply_gradients(zip(forward_result.gen_gradients, self._first_generator.variables))
self._second_discriminator_optimizer.apply_gradients(zip(forward_result.disc_gradients, self._second_discriminator.variables))
self._second_generator_optimizer.apply_gradients(zip(backward_result.gen_gradients, self._second_generator.variables))
self._first_discriminator_optimizer.apply_gradients(zip(backward_result.disc_gradients, self._first_discriminator.variables))
if batch_number == 0:
# work with the gradients of the first (rather than last) batch since here, the batch is full for sure
for i, variable in enumerate(self._first_generator.variables):
if "batch_normalization" in variable.name or forward_result.gen_gradients[i] is None:
continue
tf.contrib.summary.histogram(variable.name.replace(":", "_"), forward_result.gen_gradients[i], "gradients/first_gen", epoch)
for i, variable in enumerate(self._first_discriminator.variables):
if "batch_normalization" in variable.name or backward_result.disc_gradients[i] is None:
continue
tf.contrib.summary.histogram(variable.name.replace(":", "_"), backward_result.disc_gradients[i], "gradients/first_disc", epoch)
for i, variable in enumerate(self._second_generator.variables):
if "batch_normalization" in variable.name or backward_result.gen_gradients[i] is None:
continue
tf.contrib.summary.histogram(variable.name.replace(":", "_"), backward_result.gen_gradients[i], "gradients/second_gen", epoch)
for i, variable in enumerate(self._second_discriminator.variables):
if "batch_normalization" in variable.name or forward_result.disc_gradients[i] is None:
continue
tf.contrib.summary.histogram(variable.name.replace(":", "_"), forward_result.disc_gradients[i], "gradients/second_disc", epoch)
if (epoch+1) % gradients_interval == 0 or epoch == epochs - 1:
first_generator_gradients = [(variable.name, forward_result.gen_gradients[i].numpy()) \
for i, variable in enumerate(self._first_generator.variables) if "batch_normalization" not in variable.name]
first_discriminator_gradients = [(variable.name, backward_result.disc_gradients[i].numpy()) \
for i, variable in enumerate(self._first_discriminator.variables) if "batch_normalization" not in variable.name]
second_generator_gradients = [(variable.name, backward_result.gen_gradients[i].numpy()) \
for i, variable in enumerate(self._second_generator.variables) if "batch_normalization" not in variable.name]
second_discriminator_gradients = [(variable.name, forward_result.disc_gradients[i].numpy()) \
for i, variable in enumerate(self._second_discriminator.variables) if "batch_normalization" not in variable.name]
with open(os.path.join(self._config.gradients_dir, "gradients_at_epoch_{:04d}.pkl".format(epoch+1)), "wb") as fh:
pickle.dump((first_generator_gradients, forward_result.gen_loss, second_generator_gradients, backward_result.gen_loss,
first_discriminator_gradients, backward_result.disc_loss, second_discriminator_gradients, forward_result.disc_loss), fh)
if self.extra_discriminator_data_set:
assert not self._config.conditioned_discriminator and \
not self._config.train_disc_on_previous_images and \
not self._config.real_image_noise_stdev, "not implemented"
for disc_input in self.extra_discriminator_data_set:
with tf.GradientTape() as disc_tape:
disc_on_real = self._second_discriminator(disc_input, training=True)
disc_loss = self._model.disc_loss(disc_on_real, []) # no generated samples
gradients_of_discriminator = disc_tape.gradient(disc_loss, self._second_discriminator.variables)
self._second_discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, self._second_discriminator.variables))
_ = self.save_epoch_samples((self._first_generator, self._second_generator),
(self._first_discriminator, self._second_discriminator), epoch+1, (epoch+1) % 5 == 0)
tf.contrib.summary.histogram("first_gen", metrics.first_gen_loss, "loss", epoch)
tf.contrib.summary.histogram("first_disc", metrics.first_disc_loss, "loss", epoch)
tf.contrib.summary.histogram("second_gen", metrics.second_gen_loss, "loss", epoch)
tf.contrib.summary.histogram("second_disc", metrics.second_disc_loss, "loss", epoch)
tf.contrib.summary.histogram("first_on_real", metrics.first_disc_on_real, "predictions", epoch)
tf.contrib.summary.histogram("first_on_gen", metrics.first_disc_on_generated, "predictions", epoch)
tf.contrib.summary.histogram("second_on_real", metrics.second_disc_on_real, "predictions", epoch)
tf.contrib.summary.histogram("second_on_gen", metrics.second_disc_on_generated, "predictions", epoch)
if (epoch+1) % checkpoint_interval == 0 or epoch == epochs - 1:
self._checkpoint.save()
elif epoch > epochs - 6 and self._final_checkpoint:
self._final_checkpoint.save()
if epoch == 0 or epoch == 4 or (epoch+1) % 10 == 0:
memory_usage = ""
if epoch == 0 or epoch == 4 or (epoch+1) % 50 == 0:
try:
memory_usage = " - memory: " + get_memory_usage_string()
except: # pylint: disable=bare-except
memory_usage = " - Unable to get memory usage, no GPU available?"
time_remaining = (time.time()-self._config.start_time)/(epoch+1)*(epochs-epoch-1)/60
tf.logging.info("{}/{}: Round time: {:.1f}m - ETA {:%H:%M} ({:.1f}h){}".format(epoch + 1, epochs,
(time.time()-start)/60, datetime.now() + timedelta(minutes=time_remaining), time_remaining/60,
memory_usage))
tf.logging.info("{}/{} FWD: Loss G {:.2f}+-{:.3f}, D {:.2f}+-{:.3f}; D on real {:.3f}+-{:.3f}, on fake {:.3f}+-{:.3f}".format(
epoch + 1, epochs,
np.mean(metrics.first_gen_loss), np.std(metrics.first_gen_loss),
np.mean(metrics.second_disc_loss), np.std(metrics.second_disc_loss),
np.mean(metrics.second_disc_on_real), np.std(metrics.second_disc_on_real),
np.mean(metrics.second_disc_on_generated), np.std(metrics.second_disc_on_generated)))
tf.logging.info("{}/{} BWD: Loss G {:.2f}+-{:.3f}, D {:.2f}+-{:.3f}; D on real {:.3f}+-{:.3f}, on fake {:.3f}+-{:.3f}".format(
epoch + 1, epochs,
np.mean(metrics.second_gen_loss), np.std(metrics.second_gen_loss),
np.mean(metrics.first_disc_loss), np.std(metrics.first_disc_loss),
np.mean(metrics.first_disc_on_real), np.std(metrics.first_disc_on_real),
np.mean(metrics.first_disc_on_generated), np.std(metrics.first_disc_on_generated)))
is_near_interval = \
(epoch+0) % scores_interval == 0 or \
(epoch+1) % scores_interval == 0 or \
(epoch+2) % scores_interval == 0
if (epoch > 1 or self._config.scores_every_epoch) and (epoch > epochs - 6 or is_near_interval):
first_fid = first_mmd = first_clustering_high = first_clustering_low = first_combined_fid = tf.convert_to_tensor(np.nan)
first_low_level_fids = [tf.convert_to_tensor(np.nan)] * metrics.n_low_level_fids
second_fid = second_mmd = second_clustering_high = second_clustering_low = second_combined_fid = tf.convert_to_tensor(np.nan)
second_low_level_fids = [tf.convert_to_tensor(np.nan)] * metrics.n_low_level_fids
if self._perceptual_scores:
first_fid, first_mmd, first_clustering_high, first_clustering_low, first_low_level_fids, first_combined_fid = \
self._perceptual_scores.compute_scores_from_generator(self._first_generator, self.data_set.map(lambda x, y: x))
tf.logging.warning("{}/{}: FWD: Computed perceptual scores: FID={:.1f}, MMD={:.3f}, clustering-high={:.3f}, clustering-low={:.3f}".format(
epoch + 1, epochs, first_fid, first_mmd, first_clustering_high, first_clustering_low))
if self._reverse_perceptual_scores:
second_fid, second_mmd, second_clustering_high, second_clustering_low, second_low_level_fids, second_combined_fid = \
self._reverse_perceptual_scores.compute_scores_from_generator(self._second_generator, self.data_set.map(lambda x, y: y))
tf.logging.warning("{}/{}: BWD: Computed perceptual scores: FID={:.1f}, MMD={:.3f}, clustering-high={:.3f}, clustering-low={:.3f}".format(
epoch + 1, epochs, second_fid, second_mmd, second_clustering_high, second_clustering_low))
metrics.add_perceptual_scores((first_fid, second_fid), (first_mmd, second_mmd), (first_clustering_high, second_clustering_high),
(first_clustering_low, second_clustering_low), (first_low_level_fids, second_low_level_fids), (first_combined_fid, second_combined_fid))
tf.contrib.summary.scalar("first_fid", first_fid, "perceptual", epoch)
tf.contrib.summary.scalar("first_mmd", first_mmd, "perceptual", epoch)
tf.contrib.summary.scalar("first_clustering_high", first_clustering_high, "perceptual", epoch)
tf.contrib.summary.scalar("first_clustering_low", first_clustering_low, "perceptual", epoch)
# not adding low-level FIDs to TB since I'm not using it anyway
tf.contrib.summary.scalar("first_combined_fid", first_combined_fid, "perceptual", epoch)
tf.contrib.summary.scalar("second_fid", second_fid, "perceptual", epoch)
tf.contrib.summary.scalar("second_mmd", second_mmd, "perceptual", epoch)
tf.contrib.summary.scalar("second_clustering_high", second_clustering_high, "perceptual", epoch)
tf.contrib.summary.scalar("second_clustering_low", second_clustering_low, "perceptual", epoch)
# not adding low-level FIDs to TB since I'm not using it anyway
tf.contrib.summary.scalar("second_combined_fid", second_combined_fid, "perceptual", epoch)
if self.test_data_set and (epoch > 1 or self._config.scores_every_epoch) and (epoch > epochs - 6 or is_near_interval):
first_disc_on_training = self._discriminate_data_set(self._first_discriminator, self.data_set.map(lambda x, y: (y, x[:, :, :, :3])))
first_disc_on_training_mean = np.mean(first_disc_on_training)
first_disc_on_training_std = np.std(first_disc_on_training)
first_disc_on_test = self._discriminate_data_set(self._first_discriminator, self.test_data_set.map(lambda x, y: (y, x[:, :, :, :3])))
first_disc_on_test_mean = np.mean(first_disc_on_test)
first_disc_on_test_std = np.std(first_disc_on_test)
second_disc_on_training = self._discriminate_data_set(self._second_discriminator, self.data_set.map(lambda x, y: (x, y[:, :, :, :3])))
second_disc_on_training_mean = np.mean(second_disc_on_training)
second_disc_on_training_std = np.std(second_disc_on_training)
second_disc_on_test = self._discriminate_data_set(self._second_discriminator, self.test_data_set.map(lambda x, y: (x, y[:, :, :, :3])))
second_disc_on_test_mean = np.mean(second_disc_on_test)
second_disc_on_test_std = np.std(second_disc_on_test)
tf.logging.warning("{}/{}: First disc on training: {:.3f}+-{:.3f}, on test: {:.3f}+-{:.3f}, diff: {:.3f}".format(
epoch + 1, epochs, first_disc_on_training_mean, first_disc_on_training_std, first_disc_on_test_mean, first_disc_on_test_std,
first_disc_on_training_mean - first_disc_on_test_mean))
tf.logging.warning("{}/{}: Second disc on training: {:.3f}+-{:.3f}, on test: {:.3f}+-{:.3f}, diff: {:.3f}".format(
epoch + 1, epochs, second_disc_on_training_mean, second_disc_on_training_std, second_disc_on_test_mean, second_disc_on_test_std,
second_disc_on_training_mean - second_disc_on_test_mean))
metrics.add_disc_on_training_test((first_disc_on_training_mean, second_disc_on_training_mean), (first_disc_on_training_std,
second_disc_on_training_std), (first_disc_on_test_mean, second_disc_on_test_mean), (first_disc_on_test_std, second_disc_on_test_std))
tf.contrib.summary.scalar("first_disc_on_training_mean", first_disc_on_training_mean, "disc_overfitting", epoch)
tf.contrib.summary.scalar("first_disc_on_training_std", first_disc_on_training_std, "disc_overfitting", epoch)
tf.contrib.summary.scalar("first_disc_on_test_mean", first_disc_on_test_mean, "disc_overfitting", epoch)
tf.contrib.summary.scalar("first_disc_on_test_std", first_disc_on_test_std, "disc_overfitting", epoch)
tf.contrib.summary.scalar("second_disc_on_training_mean", second_disc_on_training_mean, "disc_overfitting", epoch)
tf.contrib.summary.scalar("second_disc_on_training_std", second_disc_on_training_std, "disc_overfitting", epoch)
tf.contrib.summary.scalar("second_disc_on_test_mean", second_disc_on_test_mean, "disc_overfitting", epoch)
tf.contrib.summary.scalar("second_disc_on_test_std", second_disc_on_test_std, "disc_overfitting", epoch)
metrics_writer.writerow(metrics.get_row_data())
@abstractmethod
def _plot_epoch_samples(self, generator, discriminator):
pass
@abstractmethod
def _plot_hq_epoch_samples(self, generated_samples, discriminator_probabilities):
pass
| 61.215103 | 148 | 0.735001 |
import os
import pickle
import time
from abc import abstractmethod
from datetime import datetime, timedelta
import numpy as np
import tensorflow as tf
from evaluation import Evaluation
from generator_loss_args import GeneratorLossArgs
from perceptual_scores import PerceptualScores
from two_way_metrics import TwoWayMetrics
from utils import get_memory_usage_string, logistic
class TwoWayEvaluation(Evaluation):
def __init__(self, model, config):
assert not config.has_noise_input, "we don't want to translate back to noise"
self._first_generator = None
self._first_generator_optimizer = None
self._first_discriminator = None
self._first_discriminator_optimizer = None
self._second_generator = None
self._second_generator_optimizer = None
self._second_discriminator = None
self._second_discriminator_optimizer = None
self._checkpoint = None
self._final_checkpoint = None
self._perceptual_scores = PerceptualScores(config) if config.target_type == "image" else None
self._reverse_perceptual_scores = PerceptualScores(config) if config.input_type == "image" else None
super(TwoWayEvaluation, self).__init__(model, config)
def set_up_model(self):
tf.logging.info("Setting up models with learing rate {} for G, {} for D".format(
self._model.gen_learning, self._model.disc_learning))
self._first_generator = self._model.get_generator()
self._first_discriminator = self._model.get_discriminator()
has_colored_target = self._config.has_colored_target
self._config.has_colored_target = self._config.has_colored_input
self._second_generator = self._model.get_generator()
self._config.has_colored_target = has_colored_target
self._second_discriminator = self._model.get_discriminator()
# defun gives 10 secs/epoch performance boost
self._first_generator.call = tf.contrib.eager.defun(self._first_generator.call)
self._first_discriminator.call = tf.contrib.eager.defun(self._first_discriminator.call)
self._second_generator.call = tf.contrib.eager.defun(self._second_generator.call)
self._second_discriminator.call = tf.contrib.eager.defun(self._second_discriminator.call)
self._first_generator_optimizer = tf.train.AdamOptimizer(self._model.gen_learning)
self._first_discriminator_optimizer = tf.train.AdamOptimizer(self._model.disc_learning)
self._second_generator_optimizer = tf.train.AdamOptimizer(self._model.gen_learning)
self._second_discriminator_optimizer = tf.train.AdamOptimizer(self._model.disc_learning)
checkpoint = tf.train.Checkpoint(
first_generator_optimizer=self._first_generator_optimizer,
first_discriminator_optimizer=self._first_discriminator_optimizer,
first_generator=self._first_generator,
first_discriminator=self._first_discriminator,
second_generator_optimizer=self._second_generator_optimizer,
second_discriminator_optimizer=self._second_discriminator_optimizer,
second_generator=self._second_generator,
second_discriminator=self._second_discriminator)
self._checkpoint = tf.contrib.checkpoint.CheckpointManager(checkpoint, self._config.checkpoint_dir,
max_to_keep=None if self._config.keep_all_checkpoints else 5)
if self._config.keep_final_checkpoints:
final_checkpoint = tf.train.Checkpoint(
first_generator_optimizer=self._first_generator_optimizer,
first_discriminator_optimizer=self._first_discriminator_optimizer,
first_generator=self._first_generator,
first_discriminator=self._first_discriminator,
second_generator_optimizer=self._second_generator_optimizer,
second_discriminator_optimizer=self._second_discriminator_optimizer,
second_generator=self._second_generator,
second_discriminator=self._second_discriminator)
self._final_checkpoint = tf.contrib.checkpoint.CheckpointManager(final_checkpoint, self._config.final_checkpoint_dir,
max_to_keep=None if self._config.keep_all_checkpoints else 5)
try:
self._model.print_model_summary(self._first_generator, self._second_discriminator, self.epoch_sample_input)
except Exception as ex:
tf.logging.warning("Unable to print model summary ({}: {})".format(ex.__class__.__name__, ex))
if self._perceptual_scores:
self._perceptual_scores.initialize()
if self._reverse_perceptual_scores:
self._reverse_perceptual_scores.initialize(self._config.data_dir)
@property
@abstractmethod
def data_set(self):
pass
@property
@abstractmethod
def extra_discriminator_data_set(self):
pass
@property
@abstractmethod
def test_data_set(self):
pass
@property
@abstractmethod
def epoch_sample_input(self):
pass
class TrainingResult:
def __init__(self, gen_loss, gen_losses, disc_loss, disc_on_real, disc_on_generated,
gen_gradients, disc_gradients):
# pylint: disable=too-many-arguments
self.gen_loss = gen_loss
self.gen_losses = gen_losses
self.disc_loss = disc_loss
self.disc_on_real = disc_on_real
self.disc_on_generated = disc_on_generated
self.gen_gradients = gen_gradients
self.disc_gradients = disc_gradients
def train_generator_discriminator_pair(self, generator, discriminator, reverse_generator, batch_input, batch_target):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# FORWARD DIRECTION
generated_images = generator(batch_input, training=True)
if batch_input.shape[-1] == 4:
assert batch_target.shape[-1] == 4
disc_input_real = tf.concat([batch_input, batch_target[:, :, :, :3]], axis=-1) \
if self._config.conditioned_discriminator else batch_target[:, :, :, :3]
disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \
if self._config.conditioned_discriminator else generated_images
else:
assert batch_target.shape[-1] in [1, 3] and batch_input.shape[-1] in [1, 3]
disc_input_real = tf.concat([batch_input, batch_target], axis=-1) \
if self._config.conditioned_discriminator else batch_target
disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \
if self._config.conditioned_discriminator else generated_images
disc_on_real = discriminator(disc_input_real, training=True)
disc_on_generated = discriminator(disc_input_generated, training=True)
# BACKWARD DIRECTION - not training or discriminating reconstructed image
# the input for the reconstruction may need to be augmented with the segmentation
reconstruction_input = tf.concat([generated_images, batch_input[:, :, :, 3:]], axis=-1) if batch_input.shape[-1] > 3 else generated_images
reconstructed_images = reverse_generator(reconstruction_input, training=False)
if self._config.loss_identity:
targets = batch_target
identity_images = generator(batch_target, training=True)
else:
targets = None
identity_images = None
gen_losses = self._model.gen_loss(disc_on_generated, GeneratorLossArgs(generated_images, batch_input,
targets=targets, reconstructed_images=reconstructed_images, identity_images=identity_images))
gen_loss = sum(gen_losses.values())
disc_loss = self._model.disc_loss(disc_on_real, disc_on_generated)
# compute gradients
gradients_of_generator = gen_tape.gradient(gen_loss, generator.variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)
return TwoWayEvaluation.TrainingResult(gen_loss, gen_losses, disc_loss, disc_on_real, disc_on_generated,
gradients_of_generator, gradients_of_discriminator)
def train_discriminator(self, generator, discriminator, batch_input, batch_target):
with tf.GradientTape() as disc_tape:
if generator:
generated_images = generator(batch_input, training=True)
if batch_input.shape[-1] == 4:
assert batch_target.shape[-1] == 4
disc_input_real = tf.concat([batch_input, batch_target[:, :, :, :3]], axis=-1) \
if self._config.conditioned_discriminator else batch_target[:, :, :, :3]
if generator:
disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \
if self._config.conditioned_discriminator else generated_images
else:
assert batch_target.shape[-1] == 3 and batch_input.shape[-1] == 3
disc_input_real = tf.concat([batch_input, batch_target], axis=-1) \
if self._config.conditioned_discriminator else batch_target
if generator:
disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \
if self._config.conditioned_discriminator else generated_images
disc_on_real = discriminator(disc_input_real, training=True)
if generator:
disc_on_generated = discriminator(disc_input_generated, training=True)
else:
disc_on_generated = None
disc_loss = self._model.disc_loss(disc_on_real, disc_on_generated)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)
return TwoWayEvaluation.TrainingResult(None, None, disc_loss, disc_on_real, disc_on_generated,
None, gradients_of_discriminator)
def train(self, epochs, metrics_writer):
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
try:
tf.logging.info("Memory usage before training: {}".format(get_memory_usage_string()))
except: # pylint: disable=bare-except
tf.logging.warning("Unable to get memory usage, no GPU available?")
assert not self._config.train_disc_on_previous_images \
and not self._config.real_image_noise_stdev, "not implemented"
checkpoint_interval = 25 # always have same interval for easier epoch number -> checkpoint-number conversion
gradients_interval = epochs // 5 // 25 * 25 # aim for at least 5 gradients in total but have it a multiple of 25
gradients_interval = 25 if gradients_interval == 0 else min(gradients_interval, 150)
if self._config.scores_every_epoch:
scores_interval = 1
else:
scores_interval = epochs // 10 // 10 * 10 # aim for at least 10 percentual scores in total but have it a multiple of 10
scores_interval = 10 if scores_interval == 0 else min(scores_interval, 25)
tf.logging.info("Intervals: checkpoint {}, scores {}, gradients {}".format(checkpoint_interval, scores_interval, gradients_interval))
for epoch in range(epochs):
start = time.time()
metrics = TwoWayMetrics(epoch+1, 4)
# NOTE, KEEP IN MIND: "first"/"second" refers to the input domain
# ie "first generator" is the generator that receives input from the first domain (and generates for the second)
if self._config.extra_disc_step_real or self._config.extra_disc_step_both:
for batch_number, batch in enumerate(self.data_set):
batch_first_domain, batch_second_domain = batch
forward_result = self.train_discriminator(self._first_generator if self._config.extra_disc_step_both else None,
self._second_discriminator, batch_first_domain, batch_second_domain)
backward_result = self.train_discriminator(self._second_generator if self._config.extra_disc_step_both else None,
self._first_discriminator, batch_second_domain, batch_first_domain)
self._second_discriminator_optimizer.apply_gradients(zip(forward_result.disc_gradients, self._second_discriminator.variables))
self._first_discriminator_optimizer.apply_gradients(zip(backward_result.disc_gradients, self._first_discriminator.variables))
for batch_number, batch in enumerate(self.data_set):
batch_first_domain, batch_second_domain = batch
# evaluate models
forward_result = self.train_generator_discriminator_pair(self._first_generator, self._second_discriminator,
self._second_generator, batch_first_domain, batch_second_domain)
backward_result = self.train_generator_discriminator_pair(self._second_generator, self._first_discriminator,
self._first_generator, batch_second_domain, batch_first_domain)
# store results
metrics.add_losses((forward_result.gen_losses, backward_result.gen_losses), (backward_result.disc_loss, forward_result.disc_loss))
metrics.add_discriminations((logistic(backward_result.disc_on_real), logistic(forward_result.disc_on_real)),
(logistic(backward_result.disc_on_generated), logistic(forward_result.disc_on_generated)))
# train
self._first_generator_optimizer.apply_gradients(zip(forward_result.gen_gradients, self._first_generator.variables))
self._second_discriminator_optimizer.apply_gradients(zip(forward_result.disc_gradients, self._second_discriminator.variables))
self._second_generator_optimizer.apply_gradients(zip(backward_result.gen_gradients, self._second_generator.variables))
self._first_discriminator_optimizer.apply_gradients(zip(backward_result.disc_gradients, self._first_discriminator.variables))
if batch_number == 0:
# work with the gradients of the first (rather than last) batch since here, the batch is full for sure
for i, variable in enumerate(self._first_generator.variables):
if "batch_normalization" in variable.name or forward_result.gen_gradients[i] is None:
continue
tf.contrib.summary.histogram(variable.name.replace(":", "_"), forward_result.gen_gradients[i], "gradients/first_gen", epoch)
for i, variable in enumerate(self._first_discriminator.variables):
if "batch_normalization" in variable.name or backward_result.disc_gradients[i] is None:
continue
tf.contrib.summary.histogram(variable.name.replace(":", "_"), backward_result.disc_gradients[i], "gradients/first_disc", epoch)
for i, variable in enumerate(self._second_generator.variables):
if "batch_normalization" in variable.name or backward_result.gen_gradients[i] is None:
continue
tf.contrib.summary.histogram(variable.name.replace(":", "_"), backward_result.gen_gradients[i], "gradients/second_gen", epoch)
for i, variable in enumerate(self._second_discriminator.variables):
if "batch_normalization" in variable.name or forward_result.disc_gradients[i] is None:
continue
tf.contrib.summary.histogram(variable.name.replace(":", "_"), forward_result.disc_gradients[i], "gradients/second_disc", epoch)
if (epoch+1) % gradients_interval == 0 or epoch == epochs - 1:
first_generator_gradients = [(variable.name, forward_result.gen_gradients[i].numpy()) \
for i, variable in enumerate(self._first_generator.variables) if "batch_normalization" not in variable.name]
first_discriminator_gradients = [(variable.name, backward_result.disc_gradients[i].numpy()) \
for i, variable in enumerate(self._first_discriminator.variables) if "batch_normalization" not in variable.name]
second_generator_gradients = [(variable.name, backward_result.gen_gradients[i].numpy()) \
for i, variable in enumerate(self._second_generator.variables) if "batch_normalization" not in variable.name]
second_discriminator_gradients = [(variable.name, forward_result.disc_gradients[i].numpy()) \
for i, variable in enumerate(self._second_discriminator.variables) if "batch_normalization" not in variable.name]
with open(os.path.join(self._config.gradients_dir, "gradients_at_epoch_{:04d}.pkl".format(epoch+1)), "wb") as fh:
pickle.dump((first_generator_gradients, forward_result.gen_loss, second_generator_gradients, backward_result.gen_loss,
first_discriminator_gradients, backward_result.disc_loss, second_discriminator_gradients, forward_result.disc_loss), fh)
if self.extra_discriminator_data_set:
assert not self._config.conditioned_discriminator and \
not self._config.train_disc_on_previous_images and \
not self._config.real_image_noise_stdev, "not implemented"
for disc_input in self.extra_discriminator_data_set:
with tf.GradientTape() as disc_tape:
disc_on_real = self._second_discriminator(disc_input, training=True)
disc_loss = self._model.disc_loss(disc_on_real, []) # no generated samples
gradients_of_discriminator = disc_tape.gradient(disc_loss, self._second_discriminator.variables)
self._second_discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, self._second_discriminator.variables))
_ = self.save_epoch_samples((self._first_generator, self._second_generator),
(self._first_discriminator, self._second_discriminator), epoch+1, (epoch+1) % 5 == 0)
tf.contrib.summary.histogram("first_gen", metrics.first_gen_loss, "loss", epoch)
tf.contrib.summary.histogram("first_disc", metrics.first_disc_loss, "loss", epoch)
tf.contrib.summary.histogram("second_gen", metrics.second_gen_loss, "loss", epoch)
tf.contrib.summary.histogram("second_disc", metrics.second_disc_loss, "loss", epoch)
tf.contrib.summary.histogram("first_on_real", metrics.first_disc_on_real, "predictions", epoch)
tf.contrib.summary.histogram("first_on_gen", metrics.first_disc_on_generated, "predictions", epoch)
tf.contrib.summary.histogram("second_on_real", metrics.second_disc_on_real, "predictions", epoch)
tf.contrib.summary.histogram("second_on_gen", metrics.second_disc_on_generated, "predictions", epoch)
if (epoch+1) % checkpoint_interval == 0 or epoch == epochs - 1:
self._checkpoint.save()
elif epoch > epochs - 6 and self._final_checkpoint:
self._final_checkpoint.save()
if epoch == 0 or epoch == 4 or (epoch+1) % 10 == 0:
memory_usage = ""
if epoch == 0 or epoch == 4 or (epoch+1) % 50 == 0:
try:
memory_usage = " - memory: " + get_memory_usage_string()
except: # pylint: disable=bare-except
memory_usage = " - Unable to get memory usage, no GPU available?"
time_remaining = (time.time()-self._config.start_time)/(epoch+1)*(epochs-epoch-1)/60
tf.logging.info("{}/{}: Round time: {:.1f}m - ETA {:%H:%M} ({:.1f}h){}".format(epoch + 1, epochs,
(time.time()-start)/60, datetime.now() + timedelta(minutes=time_remaining), time_remaining/60,
memory_usage))
tf.logging.info("{}/{} FWD: Loss G {:.2f}+-{:.3f}, D {:.2f}+-{:.3f}; D on real {:.3f}+-{:.3f}, on fake {:.3f}+-{:.3f}".format(
epoch + 1, epochs,
np.mean(metrics.first_gen_loss), np.std(metrics.first_gen_loss),
np.mean(metrics.second_disc_loss), np.std(metrics.second_disc_loss),
np.mean(metrics.second_disc_on_real), np.std(metrics.second_disc_on_real),
np.mean(metrics.second_disc_on_generated), np.std(metrics.second_disc_on_generated)))
tf.logging.info("{}/{} BWD: Loss G {:.2f}+-{:.3f}, D {:.2f}+-{:.3f}; D on real {:.3f}+-{:.3f}, on fake {:.3f}+-{:.3f}".format(
epoch + 1, epochs,
np.mean(metrics.second_gen_loss), np.std(metrics.second_gen_loss),
np.mean(metrics.first_disc_loss), np.std(metrics.first_disc_loss),
np.mean(metrics.first_disc_on_real), np.std(metrics.first_disc_on_real),
np.mean(metrics.first_disc_on_generated), np.std(metrics.first_disc_on_generated)))
is_near_interval = \
(epoch+0) % scores_interval == 0 or \
(epoch+1) % scores_interval == 0 or \
(epoch+2) % scores_interval == 0
if (epoch > 1 or self._config.scores_every_epoch) and (epoch > epochs - 6 or is_near_interval):
first_fid = first_mmd = first_clustering_high = first_clustering_low = first_combined_fid = tf.convert_to_tensor(np.nan)
first_low_level_fids = [tf.convert_to_tensor(np.nan)] * metrics.n_low_level_fids
second_fid = second_mmd = second_clustering_high = second_clustering_low = second_combined_fid = tf.convert_to_tensor(np.nan)
second_low_level_fids = [tf.convert_to_tensor(np.nan)] * metrics.n_low_level_fids
if self._perceptual_scores:
first_fid, first_mmd, first_clustering_high, first_clustering_low, first_low_level_fids, first_combined_fid = \
self._perceptual_scores.compute_scores_from_generator(self._first_generator, self.data_set.map(lambda x, y: x))
tf.logging.warning("{}/{}: FWD: Computed perceptual scores: FID={:.1f}, MMD={:.3f}, clustering-high={:.3f}, clustering-low={:.3f}".format(
epoch + 1, epochs, first_fid, first_mmd, first_clustering_high, first_clustering_low))
if self._reverse_perceptual_scores:
second_fid, second_mmd, second_clustering_high, second_clustering_low, second_low_level_fids, second_combined_fid = \
self._reverse_perceptual_scores.compute_scores_from_generator(self._second_generator, self.data_set.map(lambda x, y: y))
tf.logging.warning("{}/{}: BWD: Computed perceptual scores: FID={:.1f}, MMD={:.3f}, clustering-high={:.3f}, clustering-low={:.3f}".format(
epoch + 1, epochs, second_fid, second_mmd, second_clustering_high, second_clustering_low))
metrics.add_perceptual_scores((first_fid, second_fid), (first_mmd, second_mmd), (first_clustering_high, second_clustering_high),
(first_clustering_low, second_clustering_low), (first_low_level_fids, second_low_level_fids), (first_combined_fid, second_combined_fid))
tf.contrib.summary.scalar("first_fid", first_fid, "perceptual", epoch)
tf.contrib.summary.scalar("first_mmd", first_mmd, "perceptual", epoch)
tf.contrib.summary.scalar("first_clustering_high", first_clustering_high, "perceptual", epoch)
tf.contrib.summary.scalar("first_clustering_low", first_clustering_low, "perceptual", epoch)
# not adding low-level FIDs to TB since I'm not using it anyway
tf.contrib.summary.scalar("first_combined_fid", first_combined_fid, "perceptual", epoch)
tf.contrib.summary.scalar("second_fid", second_fid, "perceptual", epoch)
tf.contrib.summary.scalar("second_mmd", second_mmd, "perceptual", epoch)
tf.contrib.summary.scalar("second_clustering_high", second_clustering_high, "perceptual", epoch)
tf.contrib.summary.scalar("second_clustering_low", second_clustering_low, "perceptual", epoch)
tf.contrib.summary.scalar("second_combined_fid", second_combined_fid, "perceptual", epoch)
if self.test_data_set and (epoch > 1 or self._config.scores_every_epoch) and (epoch > epochs - 6 or is_near_interval):
first_disc_on_training = self._discriminate_data_set(self._first_discriminator, self.data_set.map(lambda x, y: (y, x[:, :, :, :3])))
first_disc_on_training_mean = np.mean(first_disc_on_training)
first_disc_on_training_std = np.std(first_disc_on_training)
first_disc_on_test = self._discriminate_data_set(self._first_discriminator, self.test_data_set.map(lambda x, y: (y, x[:, :, :, :3])))
first_disc_on_test_mean = np.mean(first_disc_on_test)
first_disc_on_test_std = np.std(first_disc_on_test)
second_disc_on_training = self._discriminate_data_set(self._second_discriminator, self.data_set.map(lambda x, y: (x, y[:, :, :, :3])))
second_disc_on_training_mean = np.mean(second_disc_on_training)
second_disc_on_training_std = np.std(second_disc_on_training)
second_disc_on_test = self._discriminate_data_set(self._second_discriminator, self.test_data_set.map(lambda x, y: (x, y[:, :, :, :3])))
second_disc_on_test_mean = np.mean(second_disc_on_test)
second_disc_on_test_std = np.std(second_disc_on_test)
tf.logging.warning("{}/{}: First disc on training: {:.3f}+-{:.3f}, on test: {:.3f}+-{:.3f}, diff: {:.3f}".format(
epoch + 1, epochs, first_disc_on_training_mean, first_disc_on_training_std, first_disc_on_test_mean, first_disc_on_test_std,
first_disc_on_training_mean - first_disc_on_test_mean))
tf.logging.warning("{}/{}: Second disc on training: {:.3f}+-{:.3f}, on test: {:.3f}+-{:.3f}, diff: {:.3f}".format(
epoch + 1, epochs, second_disc_on_training_mean, second_disc_on_training_std, second_disc_on_test_mean, second_disc_on_test_std,
second_disc_on_training_mean - second_disc_on_test_mean))
metrics.add_disc_on_training_test((first_disc_on_training_mean, second_disc_on_training_mean), (first_disc_on_training_std,
second_disc_on_training_std), (first_disc_on_test_mean, second_disc_on_test_mean), (first_disc_on_test_std, second_disc_on_test_std))
tf.contrib.summary.scalar("first_disc_on_training_mean", first_disc_on_training_mean, "disc_overfitting", epoch)
tf.contrib.summary.scalar("first_disc_on_training_std", first_disc_on_training_std, "disc_overfitting", epoch)
tf.contrib.summary.scalar("first_disc_on_test_mean", first_disc_on_test_mean, "disc_overfitting", epoch)
tf.contrib.summary.scalar("first_disc_on_test_std", first_disc_on_test_std, "disc_overfitting", epoch)
tf.contrib.summary.scalar("second_disc_on_training_mean", second_disc_on_training_mean, "disc_overfitting", epoch)
tf.contrib.summary.scalar("second_disc_on_training_std", second_disc_on_training_std, "disc_overfitting", epoch)
tf.contrib.summary.scalar("second_disc_on_test_mean", second_disc_on_test_mean, "disc_overfitting", epoch)
tf.contrib.summary.scalar("second_disc_on_test_std", second_disc_on_test_std, "disc_overfitting", epoch)
metrics_writer.writerow(metrics.get_row_data())
@abstractmethod
def _plot_epoch_samples(self, generator, discriminator):
pass
@abstractmethod
def _plot_hq_epoch_samples(self, generated_samples, discriminator_probabilities):
pass
| true | true |
1c317b68158e61fbbc1215cca374fee0c808c151 | 1,719 | py | Python | SSD/SSD_FPN_GIoU/model/head/build_head.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 12 | 2020-03-25T01:24:22.000Z | 2021-09-18T06:40:16.000Z | SSD/SSD_FPN_GIoU/model/head/build_head.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 1 | 2020-04-22T07:52:36.000Z | 2020-04-22T07:52:36.000Z | SSD/SSD_FPN_GIoU/model/head/build_head.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 4 | 2020-03-25T01:24:26.000Z | 2020-09-20T11:29:09.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSDHead(nn.Module):
def __init__(self,
num_classes=81,
in_channels=[256,256,256,256,256],
aspect_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2])):
super(SSDHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
num_anchors = [len(ratios) * 2 + 2 for ratios in aspect_ratios]
reg_convs = []
cls_convs = []
for i in range(len(in_channels)):
reg_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * 4,
kernel_size=3,
padding=1))
cls_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * num_classes,
kernel_size=3,
padding=1))
self.reg_convs = nn.ModuleList(reg_convs)
self.cls_convs = nn.ModuleList(cls_convs)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
def forward(self, feats):
cls_scores = []
bbox_preds = []
for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
self.cls_convs):
#[num_featuremap,w,h,c]
cls_scores.append(cls_conv(feat).permute(0, 2, 3, 1).contiguous())
bbox_preds.append(reg_conv(feat).permute(0, 2, 3, 1).contiguous())
return cls_scores, bbox_preds | 33.705882 | 78 | 0.517161 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSDHead(nn.Module):
def __init__(self,
num_classes=81,
in_channels=[256,256,256,256,256],
aspect_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2])):
super(SSDHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
num_anchors = [len(ratios) * 2 + 2 for ratios in aspect_ratios]
reg_convs = []
cls_convs = []
for i in range(len(in_channels)):
reg_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * 4,
kernel_size=3,
padding=1))
cls_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * num_classes,
kernel_size=3,
padding=1))
self.reg_convs = nn.ModuleList(reg_convs)
self.cls_convs = nn.ModuleList(cls_convs)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
def forward(self, feats):
cls_scores = []
bbox_preds = []
for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
self.cls_convs):
cls_scores.append(cls_conv(feat).permute(0, 2, 3, 1).contiguous())
bbox_preds.append(reg_conv(feat).permute(0, 2, 3, 1).contiguous())
return cls_scores, bbox_preds | true | true |
1c317ba330fa078e9471b953a8a6d9cec7efb0e9 | 390 | py | Python | scripts/skinning/gui/widgets/divider.py | robertjoosten/skinning-tools | 1f1ec6c092fdc1e39aa82a711a13a0041f9d5730 | [
"MIT"
] | 31 | 2018-09-08T16:42:01.000Z | 2022-03-31T12:31:21.000Z | scripts/skinning/gui/widgets/divider.py | robertjoosten/skinning-tools | 1f1ec6c092fdc1e39aa82a711a13a0041f9d5730 | [
"MIT"
] | null | null | null | scripts/skinning/gui/widgets/divider.py | robertjoosten/skinning-tools | 1f1ec6c092fdc1e39aa82a711a13a0041f9d5730 | [
"MIT"
] | 11 | 2018-10-01T09:57:53.000Z | 2022-03-19T06:53:02.000Z | from PySide2 import QtCore, QtWidgets, QtGui
__all__ = [
"DividerWidget",
]
class DividerWidget(QtWidgets.QFrame):
def __init__(self, parent, horizontal=True):
super(DividerWidget, self).__init__(parent)
line = QtWidgets.QFrame.HLine if horizontal else QtWidgets.QFrame.VLine
self.setFrameShape(line)
self.setFrameShadow(QtWidgets.QFrame.Sunken)
| 26 | 79 | 0.720513 | from PySide2 import QtCore, QtWidgets, QtGui
__all__ = [
"DividerWidget",
]
class DividerWidget(QtWidgets.QFrame):
def __init__(self, parent, horizontal=True):
super(DividerWidget, self).__init__(parent)
line = QtWidgets.QFrame.HLine if horizontal else QtWidgets.QFrame.VLine
self.setFrameShape(line)
self.setFrameShadow(QtWidgets.QFrame.Sunken)
| true | true |
1c317bab1af4a5ca0bb4085be1fec417dd138ee4 | 20,915 | py | Python | Week3/test_code1.py | johndolotko/pynet_course | 55372a0977994fd26ef59885f6068d831ccdeac4 | [
"Apache-2.0"
] | null | null | null | Week3/test_code1.py | johndolotko/pynet_course | 55372a0977994fd26ef59885f6068d831ccdeac4 | [
"Apache-2.0"
] | 6 | 2020-02-26T20:21:27.000Z | 2021-12-13T19:59:14.000Z | Week3/test_code1.py | johndolotko/pynet_course | 55372a0977994fd26ef59885f6068d831ccdeac4 | [
"Apache-2.0"
] | null | null | null | interfaces = [
{
"command": "show interfaces",
"result": {
"interfaces": {
"Management1": {
"lastStatusChangeTimestamp": 1538591522.8171098,
"name": "Management1",
"interfaceStatus": "disabled",
"autoNegotiate": "off",
"burnedInAddress": "52:54:ab:02:a1:10",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 1500,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 1000000000,
"forwardingModel": "routed",
"lineProtocolStatus": "down",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 2,
"totalOutErrors": 0,
"inMulticastPkts": 0,
"counterRefreshTime": 1539281777.827566,
"inBroadcastPkts": 0,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 0,
"outDiscards": 0,
"outOctets": 0,
"inUcastPkts": 0,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 0,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:10",
"description": "",
},
"Vlan1": {
"lastStatusChangeTimestamp": 1538591527.373837,
"name": "Vlan1",
"interfaceStatus": "connected",
"burnedInAddress": "52:54:ab:be:5b:7b",
"mtu": 1500,
"hardware": "vlan",
"bandwidth": 0,
"forwardingModel": "routed",
"lineProtocolStatus": "up",
"interfaceAddress": [
{
"secondaryIpsOrderedList": [],
"broadcastAddress": "255.255.255.255",
"secondaryIps": {},
"primaryIp": {"maskLen": 24, "address": "10.220.88.29"},
"virtualIp": {"maskLen": 0, "address": "0.0.0.0"},
}
],
"physicalAddress": "52:54:ab:be:5b:7b",
"description": "",
},
"Ethernet2": {
"lastStatusChangeTimestamp": 1538591527.2428443,
"name": "Ethernet2",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:12",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498763,
"counterRefreshTime": 1539281777.849249,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78083049,
"outDiscards": 0,
"outOctets": 4027630,
"inUcastPkts": 38214,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23018,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:12",
"description": "",
},
"Ethernet3": {
"lastStatusChangeTimestamp": 1538591527.243047,
"name": "Ethernet3",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:13",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498769,
"counterRefreshTime": 1539281777.875926,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78084075,
"outDiscards": 0,
"outOctets": 4026892,
"inUcastPkts": 38215,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23012,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:13",
"description": "",
},
"Ethernet1": {
"lastStatusChangeTimestamp": 1538591527.2426362,
"name": "Ethernet1",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:11",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 1,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498763,
"counterRefreshTime": 1539281777.867376,
"inBroadcastPkts": 4170,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 76679169,
"outDiscards": 0,
"outOctets": 5431798,
"inUcastPkts": 22895,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 15320,
"outMulticastPkts": 23018,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:11",
"description": "",
},
"Ethernet6": {
"lastStatusChangeTimestamp": 1538591527.2436152,
"name": "Ethernet6",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:16",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498768,
"counterRefreshTime": 1539281777.897336,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78083936,
"outDiscards": 0,
"outOctets": 4026892,
"inUcastPkts": 38215,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23012,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:16",
"description": "",
},
"Ethernet7": {
"lastStatusChangeTimestamp": 1538591527.243805,
"name": "Ethernet7",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:17",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498769,
"counterRefreshTime": 1539281777.837162,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78083771,
"outDiscards": 0,
"outOctets": 4026769,
"inUcastPkts": 38214,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23011,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:17",
"description": "",
},
"Ethernet4": {
"lastStatusChangeTimestamp": 1538591527.243236,
"name": "Ethernet4",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:14",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"lastClear": 1538591421.972857,
"inMulticastPkts": 498767,
"counterRefreshTime": 1539281777.858641,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78083541,
"outDiscards": 0,
"outOctets": 4027138,
"inUcastPkts": 38214,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23014,
"totalInErrors": 0,
"inDiscards": 0,
"totalOutErrors": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:14",
"description": "",
},
"Ethernet5": {
"lastStatusChangeTimestamp": 1538591527.2434251,
"name": "Ethernet5",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:15",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498770,
"counterRefreshTime": 1539281777.884441,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78084198,
"outDiscards": 0,
"outOctets": 4026769,
"inUcastPkts": 38215,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23011,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:15",
"description": "",
},
}
},
"encoding": "json",
}
]
# new_interfaces = interfaces[0]["result"]["interfaces"]
# for intf_name, intf_value in new_interfaces.items():
# print(intf_name)
# print(intf_value["interfaceStatus"])
# print()
new_interfaces = interfaces[0]
new_interfaces = new_interfaces["result"]
new_interfaces = new_interfaces["interfaces"]
for intf_name, intf_value in new_interfaces.items():
print(intf_name)
print(intf_value["interfaceStatus"])
print()
| 43.302277 | 84 | 0.346067 | interfaces = [
{
"command": "show interfaces",
"result": {
"interfaces": {
"Management1": {
"lastStatusChangeTimestamp": 1538591522.8171098,
"name": "Management1",
"interfaceStatus": "disabled",
"autoNegotiate": "off",
"burnedInAddress": "52:54:ab:02:a1:10",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 1500,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 1000000000,
"forwardingModel": "routed",
"lineProtocolStatus": "down",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 2,
"totalOutErrors": 0,
"inMulticastPkts": 0,
"counterRefreshTime": 1539281777.827566,
"inBroadcastPkts": 0,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 0,
"outDiscards": 0,
"outOctets": 0,
"inUcastPkts": 0,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 0,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:10",
"description": "",
},
"Vlan1": {
"lastStatusChangeTimestamp": 1538591527.373837,
"name": "Vlan1",
"interfaceStatus": "connected",
"burnedInAddress": "52:54:ab:be:5b:7b",
"mtu": 1500,
"hardware": "vlan",
"bandwidth": 0,
"forwardingModel": "routed",
"lineProtocolStatus": "up",
"interfaceAddress": [
{
"secondaryIpsOrderedList": [],
"broadcastAddress": "255.255.255.255",
"secondaryIps": {},
"primaryIp": {"maskLen": 24, "address": "10.220.88.29"},
"virtualIp": {"maskLen": 0, "address": "0.0.0.0"},
}
],
"physicalAddress": "52:54:ab:be:5b:7b",
"description": "",
},
"Ethernet2": {
"lastStatusChangeTimestamp": 1538591527.2428443,
"name": "Ethernet2",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:12",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498763,
"counterRefreshTime": 1539281777.849249,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78083049,
"outDiscards": 0,
"outOctets": 4027630,
"inUcastPkts": 38214,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23018,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:12",
"description": "",
},
"Ethernet3": {
"lastStatusChangeTimestamp": 1538591527.243047,
"name": "Ethernet3",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:13",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498769,
"counterRefreshTime": 1539281777.875926,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78084075,
"outDiscards": 0,
"outOctets": 4026892,
"inUcastPkts": 38215,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23012,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:13",
"description": "",
},
"Ethernet1": {
"lastStatusChangeTimestamp": 1538591527.2426362,
"name": "Ethernet1",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:11",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 1,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498763,
"counterRefreshTime": 1539281777.867376,
"inBroadcastPkts": 4170,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 76679169,
"outDiscards": 0,
"outOctets": 5431798,
"inUcastPkts": 22895,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 15320,
"outMulticastPkts": 23018,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:11",
"description": "",
},
"Ethernet6": {
"lastStatusChangeTimestamp": 1538591527.2436152,
"name": "Ethernet6",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:16",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498768,
"counterRefreshTime": 1539281777.897336,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78083936,
"outDiscards": 0,
"outOctets": 4026892,
"inUcastPkts": 38215,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23012,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:16",
"description": "",
},
"Ethernet7": {
"lastStatusChangeTimestamp": 1538591527.243805,
"name": "Ethernet7",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:17",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498769,
"counterRefreshTime": 1539281777.837162,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78083771,
"outDiscards": 0,
"outOctets": 4026769,
"inUcastPkts": 38214,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23011,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:17",
"description": "",
},
"Ethernet4": {
"lastStatusChangeTimestamp": 1538591527.243236,
"name": "Ethernet4",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:14",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"lastClear": 1538591421.972857,
"inMulticastPkts": 498767,
"counterRefreshTime": 1539281777.858641,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78083541,
"outDiscards": 0,
"outOctets": 4027138,
"inUcastPkts": 38214,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23014,
"totalInErrors": 0,
"inDiscards": 0,
"totalOutErrors": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:14",
"description": "",
},
"Ethernet5": {
"lastStatusChangeTimestamp": 1538591527.2434251,
"name": "Ethernet5",
"interfaceStatus": "connected",
"autoNegotiate": "unknown",
"burnedInAddress": "52:54:ab:02:a1:15",
"loopbackMode": "loopbackNone",
"interfaceStatistics": {
"inBitsRate": 0.0,
"inPktsRate": 0.0,
"outBitsRate": 0.0,
"updateInterval": 300.0,
"outPktsRate": 0.0,
},
"mtu": 9214,
"hardware": "ethernet",
"duplex": "duplexFull",
"bandwidth": 0,
"forwardingModel": "bridged",
"lineProtocolStatus": "up",
"interfaceCounters": {
"outBroadcastPkts": 0,
"linkStatusChanges": 1,
"totalOutErrors": 0,
"inMulticastPkts": 498770,
"counterRefreshTime": 1539281777.884441,
"inBroadcastPkts": 4171,
"outputErrorsDetail": {
"deferredTransmissions": 0,
"txPause": 0,
"collisions": 0,
"lateCollisions": 0,
},
"inOctets": 78084198,
"outDiscards": 0,
"outOctets": 4026769,
"inUcastPkts": 38215,
"inputErrorsDetail": {
"runtFrames": 0,
"rxPause": 0,
"fcsErrors": 0,
"alignmentErrors": 0,
"giantFrames": 0,
"symbolErrors": 0,
},
"outUcastPkts": 0,
"outMulticastPkts": 23011,
"totalInErrors": 0,
"inDiscards": 0,
},
"interfaceAddress": [],
"physicalAddress": "52:54:ab:02:a1:15",
"description": "",
},
}
},
"encoding": "json",
}
]
new_interfaces = interfaces[0]
new_interfaces = new_interfaces["result"]
new_interfaces = new_interfaces["interfaces"]
for intf_name, intf_value in new_interfaces.items():
print(intf_name)
print(intf_value["interfaceStatus"])
print()
| true | true |
1c317c067123100a50afe795f1809da2ad9767ea | 467 | py | Python | apps/base/views/website_config.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 115 | 2019-08-18T16:12:54.000Z | 2022-03-29T14:17:20.000Z | apps/base/views/website_config.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 22 | 2019-09-09T01:34:54.000Z | 2022-03-12T00:33:40.000Z | apps/base/views/website_config.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 83 | 2019-08-17T17:09:20.000Z | 2022-03-25T04:46:53.000Z | # Django Library
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
# Localfolder Library
from ..models.website_config import PyWebsiteConfig
from .web_father import FatherUpdateView
class UpdateWebsiteConfigView(LoginRequiredMixin, FatherUpdateView):
model = PyWebsiteConfig
template_name = 'base/form.html'
fields = ['show_blog', 'show_shop', 'under_construction', 'show_chat','show_price','user_register']
| 33.357143 | 103 | 0.802998 |
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from ..models.website_config import PyWebsiteConfig
from .web_father import FatherUpdateView
class UpdateWebsiteConfigView(LoginRequiredMixin, FatherUpdateView):
model = PyWebsiteConfig
template_name = 'base/form.html'
fields = ['show_blog', 'show_shop', 'under_construction', 'show_chat','show_price','user_register']
| true | true |
1c317c6f990469410000fc123e1ab7a1df626cbd | 314 | py | Python | LC/70.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | 2 | 2018-02-24T17:20:02.000Z | 2018-02-24T17:25:43.000Z | LC/70.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | LC/70.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | class Solution(object):
dict={1:1, 2:2}
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
if n in self.dict:
return self.dict[n]
else:
self.dict[n] = self.climbStairs(n-2) + njh
return self.dict[n] | 26.166667 | 74 | 0.43949 | class Solution(object):
dict={1:1, 2:2}
def climbStairs(self, n):
if n in self.dict:
return self.dict[n]
else:
self.dict[n] = self.climbStairs(n-2) + njh
return self.dict[n] | true | true |
1c317cfa8a107f005e2676b9980a1ba3186d2250 | 34,103 | py | Python | alveus/ribbon.py | FrederikLehn/alveus | 71a858d0cdd8a4bbd06a28eb35fa7a8a7bd4814b | [
"MIT"
] | null | null | null | alveus/ribbon.py | FrederikLehn/alveus | 71a858d0cdd8a4bbd06a28eb35fa7a8a7bd4814b | [
"MIT"
] | null | null | null | alveus/ribbon.py | FrederikLehn/alveus | 71a858d0cdd8a4bbd06a28eb35fa7a8a7bd4814b | [
"MIT"
] | null | null | null | # generic imports ------------------------------------------------------------------------------------------------------
import types
# wxPython imports -----------------------------------------------------------------------------------------------------
import wx.lib.agw.ribbon as rb
from wx.lib.agw.gradientbutton import GradientButton
from wx.lib.agw.ribbon.art import RIBBON_BAR_SHOW_PAGE_LABELS, RIBBON_BAR_SHOW_PAGE_ICONS
# Alveus imports -------------------------------------------------------------------------------------------------------
from _ids import *
import _icons as ico
from widgets.customized_menu import CustomMenuItem, CustomMenu
# ----------------------------------------------------------------------------------------------------------------------
class Ribbon(rb.RibbonBar):
def __init__(self, parent):
super().__init__(parent=parent, id=wx.ID_ANY, agwStyle=rb.RIBBON_BAR_DEFAULT_STYLE | rb.RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS)
# File tab------------------------------------------------------------------------------------------------------
self.file_page = RibbonFileTab(self)
self.file_menu = RibbonFileMenu()
self.file_page.Bind(wx.EVT_BUTTON, self.OnFileTabMenu)
# Home tab------------------------------------------------------------------------------------------------------
home = rb.RibbonPage(self, wx.ID_ANY, 'Home')
window_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Window')
self.window = rb.RibbonButtonBar(window_panel)
self.window.AddHybridButton(ID_WINDOW, 'Window', ico.window_32x32.GetBitmap(), 'Add new window')
self.window.AddSimpleButton(ID_WINDOW_REFRESH, 'Refresh', ico.window_refresh_32x32.GetBitmap(), 'Refresh active window')
self.window.AddToggleButton(ID_WINDOW_PRESENT, 'Present', ico.window_32x32.GetBitmap(), 'Change to presentation mode')
generic_chart_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Generic charts')
self.generic_chart = rb.RibbonButtonBar(generic_chart_panel)
self.generic_chart.AddSimpleButton(ID_CHART_CARTESIAN, 'Cartesian', ico.cartesian_chart_32x32.GetBitmap(), 'Add new cartesian chart')
self.generic_chart.AddSimpleButton(ID_CHART_STACKED, 'Stacked', ico.stacked_chart_32x32.GetBitmap(), 'Add new stacked chart')
self.generic_chart.AddSimpleButton(ID_CHART_BAR, 'Bar', ico.bar_chart_32x32.GetBitmap(), 'Add new bar chart')
self.generic_chart.AddSimpleButton(ID_CHART_BUBBLE, 'Bubble', ico.bubble_chart_32x32.GetBitmap(), 'Add new bubble chart')
self.generic_chart.AddSimpleButton(ID_CHART_HISTOGRAM, 'Histogram', ico.histogram_chart_32x32.GetBitmap(), 'Add new histogram')
self.generic_chart.AddSimpleButton(ID_CHART_MAP, 'Map', ico.map_chart_32x32.GetBitmap(), 'Add new map')
self.generic_chart.AddSimpleButton(ID_CHART_3D, '3D', ico.threeD_chart_32x32.GetBitmap(), 'Add 3D chart')
custom_chart_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Custom charts')
self.custom_chart = rb.RibbonButtonBar(custom_chart_panel)
self.custom_chart.AddSimpleButton(ID_CHART_FIT, 'Fits', ico.fit_chart_32x32.GetBitmap(), 'Add new fit chart')
self.custom_chart.AddSimpleButton(ID_CHART_TREND, 'Trends', ico.trend_chart_32x32.GetBitmap(), 'Add new trend chart')
self.custom_chart.AddSimpleButton(ID_CHART_INCREMENT, 'Increments', ico.increment_chart_32x32.GetBitmap(), 'Add new increment chart')
self.custom_chart.AddSimpleButton(ID_CHART_PROFILES, 'Profiles', ico.profiles_chart_32x32.GetBitmap(), 'Add new profiles chart')
export_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Export')
self.export = rb.RibbonButtonBar(export_panel)
self.export.AddSimpleButton(ID_EXPORT_EXCEL, 'Export', ico.export_spreadsheet_32x32.GetBitmap(), 'Open profile export frame')
correlation_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Correlation')
self.correlation = rb.RibbonButtonBar(correlation_panel)
self.correlation.AddSimpleButton(ID_CORRELATION_ENT, 'Entity', ico.correlation_entity_32x32.GetBitmap(), 'Open entity correlation frame')
self.correlation.AddSimpleButton(ID_CORRELATION_VAR, 'Variable', ico.correlation_variable_32x32.GetBitmap(), 'Open variable correlation frame')
summary_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Summary')
self.summary = rb.RibbonButtonBar(summary_panel)
self.summary.AddSimpleButton(ID_SUMMARY, 'Summary', ico.summary_32x32.GetBitmap(), 'Add new summary variable')
# Entities tab -------------------------------------------------------------------------------------------------
entities = rb.RibbonPage(self, wx.ID_ANY, 'Entities')
folder_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Folders')
self.folder = rb.RibbonButtonBar(folder_panel)
self.folder.AddSimpleButton(ID_FOLDER, 'Folder', ico.folder_closed_32x32.GetBitmap(), 'Add new folder')
portfolio_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Portfolio')
self.portfolio = rb.RibbonButtonBar(portfolio_panel)
self.portfolio.AddSimpleButton(ID_ANALOGUE, 'Analogue', ico.analogue_32x32.GetBitmap(), 'Add new analogue')
self.portfolio.AddSimpleButton(ID_TYPECURVE, 'Typecurve', ico.trend_chart_32x32.GetBitmap(), 'Add new typecurve')
self.portfolio.AddSimpleButton(ID_SCALING, 'Scaling', ico.scaling_chart_32x32.GetBitmap(), 'Add new scaling')
subsurface_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Subsurface')
self.subsurface = rb.RibbonButtonBar(subsurface_panel)
self.subsurface.AddSimpleButton(ID_RESERVOIR, 'Reservoir', ico.reservoir_32x32.GetBitmap(), 'Add new reservoir')
self.subsurface.AddSimpleButton(ID_THEME, 'Theme', ico.theme_32x32.GetBitmap(), 'Add new theme')
self.subsurface.AddSimpleButton(ID_POLYGON, 'Polygon', ico.polygon_32x32.GetBitmap(), 'Add new polygon')
self.subsurface.AddSimpleButton(ID_PRODUCER, 'Producer', ico.producer_oil_gas_32x32.GetBitmap(), 'Add new producer')
self.subsurface.AddSimpleButton(ID_INJECTOR, 'Injector', ico.injector_wag_32x32.GetBitmap(), 'Add new injector')
facility_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Facility')
self.facility = rb.RibbonButtonBar(facility_panel)
self.facility.AddSimpleButton(ID_PLATFORM, 'Platform', ico.platforms_32x32.GetBitmap(), 'Add new platform')
self.facility.AddSimpleButton(ID_PROCESSOR, 'Processor', ico.processor_32x32.GetBitmap(), 'Add new processor')
self.facility.AddSimpleButton(ID_PIPELINE, 'Pipeline', ico.pipeline_32x32.GetBitmap(), 'Add new pipeline')
concession_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Concession')
self.concession = rb.RibbonButtonBar(concession_panel)
self.concession.AddSimpleButton(ID_FIELD, 'Field', ico.field_32x32.GetBitmap(), 'Add new field')
self.concession.AddSimpleButton(ID_BLOCK, 'Block', ico.block_32x32.GetBitmap(), 'Add new block')
simulation_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Simulation')
self.simulation = rb.RibbonButtonBar(simulation_panel)
self.simulation.AddSimpleButton(ID_PROJECT, 'Project', ico.project_32x32.GetBitmap(), 'Add new project')
self.simulation.AddSimpleButton(ID_HISTORY, 'History', ico.history_match_32x32.GetBitmap(), 'Add new history')
self.simulation.AddSimpleButton(ID_SCENARIO, 'Scenario', ico.scenario_32x32.GetBitmap(), 'Add new scenario')
self.simulation.AddSimpleButton(ID_PREDICTION, 'Prediction', ico.prediction_32x32.GetBitmap(), 'Add new prediction')
self.ChangeArtProvider()
self.Realize()
# ==================================================================================================================
# Events
# ==================================================================================================================
# comes from: https://github.com/wxWidgets/wxPython/blob/master/demo/agw/FlatMenu.py (26-08-2019)
# lines: 538-561
def OnFileTabMenu(self, event):
button = event.GetEventObject()
button_size = button.GetSize()
button_pos = button.GetPosition()
button_pos = button.GetParent().ClientToScreen(button_pos)
self.file_menu.SetOwnerHeight(button_size.y)
self.file_menu.Popup(wx.Point(button_pos.x, button_pos.y), self)
# ==================================================================================================================
# External Methods
# ==================================================================================================================
def EnableButtons(self, state, entity_mgr=None):
"""
Enables or disables ribbon buttons. If state is False, all buttons are disabled, if state is True, the enabling
is based on certain criteria from the entity_mgr w.r.t. lower hierarchy entities not being enabled if no
higher level entity is available.
:param state: bool
:param entity_mgr: class EntityManager
:return:
"""
# Enable file menu
self.file_menu.save.Enable(state)
self.file_menu.save_as.Enable(state)
self.file_menu.close.Enable(state)
self.file_menu.settings.Enable(state)
# Enable ribbon
self.folder.EnableButton(ID_FOLDER, state)
self.window.EnableButton(ID_WINDOW, state)
self.window.EnableButton(ID_WINDOW_REFRESH, state)
self.window.EnableButton(ID_WINDOW_PRESENT, state)
self.generic_chart.EnableButton(ID_CHART_CARTESIAN, state)
self.generic_chart.EnableButton(ID_CHART_STACKED, state)
self.generic_chart.EnableButton(ID_CHART_BAR, state)
self.generic_chart.EnableButton(ID_CHART_BUBBLE, state)
self.generic_chart.EnableButton(ID_CHART_HISTOGRAM, state)
self.generic_chart.EnableButton(ID_CHART_MAP, state)
self.generic_chart.EnableButton(ID_CHART_3D, state)
# TODO: Once charts are created, replace false with state
self.custom_chart.EnableButton(ID_CHART_FIT, state)
self.custom_chart.EnableButton(ID_CHART_TREND, False)
self.custom_chart.EnableButton(ID_CHART_INCREMENT, False)
self.custom_chart.EnableButton(ID_CHART_PROFILES, False)
self.export.EnableButton(ID_EXPORT_EXCEL, state)
self.correlation.EnableButton(ID_CORRELATION_ENT, state)
self.correlation.EnableButton(ID_CORRELATION_VAR, state)
self.summary.EnableButton(ID_SUMMARY, state)
# Entities tab -------------------------------------------------------------------------------------------------
# analogues and typecurves
self.portfolio.EnableButton(ID_ANALOGUE, state)
self.portfolio.EnableButton(ID_SCALING, state)
if state:
if entity_mgr.GetAnalogues():
self.portfolio.EnableButton(ID_TYPECURVE, state)
else:
self.portfolio.EnableButton(ID_TYPECURVE, False)
else:
self.portfolio.EnableButton(ID_TYPECURVE, state)
# subsurface (reservoirs, themes, polygons, producers and injectors)
self.subsurface.EnableButton(ID_RESERVOIR, state)
if state:
if entity_mgr.GetReservoirs():
self.subsurface.EnableButton(ID_THEME, state)
if entity_mgr.GetThemes():
self.subsurface.EnableButton(ID_POLYGON, state)
if entity_mgr.GetPolygons():
self.subsurface.EnableButton(ID_PRODUCER, state)
self.subsurface.EnableButton(ID_INJECTOR, state)
else:
self.subsurface.EnableButton(ID_PRODUCER, False)
self.subsurface.EnableButton(ID_INJECTOR, False)
else:
self.subsurface.EnableButton(ID_POLYGON, False)
else:
self.subsurface.EnableButton(ID_THEME, False)
else:
self.subsurface.EnableButton(ID_THEME, state)
self.subsurface.EnableButton(ID_POLYGON, state)
self.subsurface.EnableButton(ID_PRODUCER, state)
self.subsurface.EnableButton(ID_INJECTOR, state)
# facilities (platforms, processors and pipelines)
self.facility.EnableButton(ID_PLATFORM, state)
self.facility.EnableButton(ID_PIPELINE, state)
if state:
if entity_mgr.GetPlatforms():
self.facility.EnableButton(ID_PROCESSOR, state)
else:
self.facility.EnableButton(ID_PROCESSOR, False)
else:
self.facility.EnableButton(ID_PROCESSOR, state)
# concessions (fields and blocks)
self.concession.EnableButton(ID_FIELD, state)
self.concession.EnableButton(ID_BLOCK, state)
# projects (projects, histories, scenarios and predictions)
self.simulation.EnableButton(ID_PROJECT, state)
if state:
if entity_mgr.GetProjects():
self.simulation.EnableButton(ID_HISTORY, state)
self.simulation.EnableButton(ID_SCENARIO, state)
if entity_mgr.GetScenarios():
self.simulation.EnableButton(ID_PREDICTION, state)
else:
self.simulation.EnableButton(ID_PREDICTION, False)
else:
self.simulation.EnableButton(ID_HISTORY, False)
self.simulation.EnableButton(ID_SCENARIO, False)
else:
self.simulation.EnableButton(ID_HISTORY, state)
self.simulation.EnableButton(ID_SCENARIO, state)
self.simulation.EnableButton(ID_PREDICTION, state)
# Based on: https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (16-07-2019)
def ChangeArtProvider(self):
art = self.GetArtProvider()
# add changes to drawing methods
art.DrawTab = types.MethodType(DrawTab, art)
art.DrawPanelBackground = types.MethodType(DrawPanelBackground, art)
art.DrawPanelBorder = types.MethodType(DrawPanelBorder, art)
art.DrawPageBackground = types.MethodType(DrawPageBackground, art)
# ==============================================================================================================
# drawing distances
# ==============================================================================================================
art._cached_tab_separator_visibility = -10.0 # valid visibilities are in range [0, 1]
art._tab_separation_size = 0
art._page_border_left = 1
art._page_border_top = 0
art._page_border_right = 0
art._page_border_bottom = 2
art._panel_x_separation_size = -1
art._panel_y_separation_size = 0
art._cached_tab_separator = wx.NullBitmap
# ==============================================================================================================
# colours
# ==============================================================================================================
# Tabs ---------------------------------------------------------------------------------------------------------
# sets the colour of tab labels (created by Andrea Gavana
# art._tab_label_colour = wx.Colour(255, 255, 255)
# Adjusted by Frederik Lehn to allow for different colour of active tab, hovered tab and passive tab
art._tab_label_colour = wx.Colour(255, 255, 255)
art._tab_active_label_colour = wx.Colour(0, 0, 0)
art._tab_hover_label_colour = wx.Colour(255, 255, 255)
# dont know
# art._tab_separator_colour = wx.Colour(255, 0, 0)
# art._tab_separator_gradient_colour = wx.Colour(200, 0, 0)
# sets the colour of the active tab
art._tab_active_background_colour = wx.Colour(255, 255, 255)
art._tab_active_background_gradient_colour = wx.Colour(230, 230, 230)
# sets colour of the hovered tab
art._tab_hover_background_top_colour = wx.Colour(100, 100, 100)
art._tab_hover_background_top_gradient_colour = wx.Colour(105, 105, 105)
art._tab_hover_background_colour = wx.Colour(105, 105, 105)
art._tab_hover_background_gradient_colour = wx.Colour(110, 110, 110)
# Sets the colour behind the tabs
art._tab_ctrl_background_brush = wx.Brush(wx.Colour(55, 55, 55))
# sets the colour of the border around the active tabs
art._tab_border_pen = wx.Pen(wx.Colour(55, 55, 55))
# Panels -------------------------------------------------------------------------------------------------------
# sets the colour of the label of the panel
art._panel_label_colour = wx.Colour(0, 0, 0)
art._panel_hover_label_colour = wx.Colour(0, 0, 0)
art._panel_minimised_label_colour = wx.Colour(0, 0, 0)
# don't know
# art._panel_active_background_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# art._panel_active_background_gradient_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# art._panel_active_background_top_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# art._panel_active_background_top_gradient_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# sets the colour of the background of the panel label
art._panel_label_background_brush = wx.Brush(wx.Colour(230, 230, 230))
art._panel_hover_label_background_brush = wx.Brush(wx.Colour(230, 230, 230))
# dont' know
# art._panel_hover_button_background_brush = wx.Brush(wx.Colour(255, 0, 0))
# sets the colour of the border around the panel
art._panel_border_pen = wx.Pen(wx.Colour(143, 143, 143))
art._panel_border_gradient_pen = wx.Pen(wx.Colour(143, 143, 143))
# Pages --------------------------------------------------------------------------------------------------------
# Sets the colour of the tab pages
art._page_background_top_colour = wx.Colour(230, 230, 230)
art._page_background_top_gradient_colour = wx.Colour(242, 242, 242)
art._page_background_colour = wx.Colour(242, 242, 242)
art._page_background_gradient_colour = wx.Colour(255, 255, 255)
# sets the colour of the background of the panels when hovering on them (not the pages)
art._page_hover_background_top_colour = art._page_background_top_colour
art._page_hover_background_top_gradient_colour = art._page_background_top_gradient_colour
art._page_hover_background_colour = art._page_background_colour
art._page_hover_background_gradient_colour = art._page_background_gradient_colour
# sets the colour of the border around the pages,
art._page_border_pen = wx.Pen(wx.Colour(83, 83, 83))
# introduced by Frederik Lehn to allow for a different coloured top border
art._page_border_top_pen = wx.Pen(wx.Colour(244, 170, 0))
# Buttons ------------------------------------------------------------------------------------------------------
# Sets the colour of the label of a button
art._button_bar_label_colour = wx.Colour(0, 0, 0)
# Sets the colour when clicking on a button
art._button_bar_active_background_top_colour = wx.Colour(255, 218, 109)
art._button_bar_active_background_top_gradient_colour = wx.Colour(255, 218, 109)
art._button_bar_active_background_colour = wx.Colour(255, 218, 109)
art._button_bar_active_background_gradient_colour = wx.Colour(255, 218, 109)
# Sets the colour when hovering on a button
art._button_bar_hover_background_top_colour = wx.Colour(255, 227, 125)
art._button_bar_hover_background_top_gradient_colour = wx.Colour(254, 233, 157)
art._button_bar_hover_background_colour = wx.Colour(254, 233, 157)
art._button_bar_hover_background_gradient_colour = wx.Colour(253, 243, 204)
# Sets the colour of the border when clicking and hovering on a button
art._button_bar_active_border_pen = wx.Pen(wx.Colour(194, 150, 61))
art._button_bar_hover_border_pen = wx.Pen(wx.Colour(242, 201, 88))
self.SetArtProvider(art)
class RibbonFileMenu(CustomMenu):
def __init__(self):
super().__init__()
self.save = CustomMenuItem(self, id=wx.ID_ANY, label='Save project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.save_32x32.GetBitmap())
self.save_as = CustomMenuItem(self, id=wx.ID_ANY, label='Save project as', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.save_as_32x32.GetBitmap())
self.open = CustomMenuItem(self, id=wx.ID_ANY, label='Open project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.project_open_32x32.GetBitmap())
self.close = CustomMenuItem(self, id=wx.ID_ANY, label='Close project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.project_close_32x32.GetBitmap())
self.new = CustomMenuItem(self, id=wx.ID_ANY, label='New project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_OTHER, wx.Size(32, 32)))
self.settings = CustomMenuItem(self, id=wx.ID_ANY, label='Settings', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.settings_32x32.GetBitmap())
self.AppendItem(self.save)
self.AppendItem(self.save_as)
self.AppendSeparator()
self.AppendItem(self.open)
self.AppendItem(self.close)
self.AppendItem(self.new)
self.AppendSeparator()
self.AppendItem(self.settings)
class RibbonFileTab(GradientButton):
def __init__(self, parent):
super().__init__(parent=parent, id=wx.ID_ANY, label='File', pos=(1, 2), size=(49, 24))
self.GetPath = types.MethodType(GetPathGradientButton, self)
self.SetTopStartColour(wx.Colour(236, 201, 10))
self.SetTopEndColour(wx.Colour(250, 192, 0))
self.SetBottomStartColour(wx.Colour(250, 192, 0))
self.SetBottomEndColour(wx.Colour(244, 170, 0))
self.SetPressedTopColour(wx.Colour(244, 170, 0))
self.SetPressedBottomColour(wx.Colour(244, 170, 0))
self.SetForegroundColour(wx.Colour(0, 0, 0))
# ======================================================================================================================
# Functions used to change the ArtProvider of the ribbon
# ======================================================================================================================
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (17-07-2019)
# Changes are made to lines (in the link): 993-1007 in order to remove the curved edges at the bottom of the tabs
# Changes are made to lines (in the link): 982-991 in order to remove the curved edges at the top of the tabs
# Changes are made to lines (in the link): 1023 to have black colour for active tab and white for inactive
def DrawTab(self, dc, wnd, tab):
if tab.rect.height <= 2:
return
if tab.active or tab.hovered:
if tab.active:
background = wx.Rect(*tab.rect)
background.SetX(background.GetX() + 2)
background.SetY(background.GetY() + 2)
background.SetWidth(background.GetWidth() - 4)
background.SetHeight(background.GetHeight() - 2)
dc.GradientFillLinear(background, self._tab_active_background_colour,
self._tab_active_background_gradient_colour, wx.SOUTH)
# TODO: active and hovered
elif tab.hovered:
background = wx.Rect(*tab.rect)
background.SetX(background.GetX() + 2)
background.SetY(background.GetY() + 2)
background.SetWidth(background.GetWidth() - 4)
background.SetHeight(background.GetHeight() - 3)
h = background.GetHeight()
background.SetHeight(background.GetHeight() / 2)
dc.GradientFillLinear(background, self._tab_hover_background_top_colour,
self._tab_hover_background_top_gradient_colour, wx.SOUTH)
background.SetY(background.GetY() + background.GetHeight())
background.SetHeight(h - background.GetHeight())
dc.GradientFillLinear(background, self._tab_hover_background_colour,
self._tab_hover_background_gradient_colour, wx.SOUTH)
# Draw the outline of the tab
dc.SetPen(self._tab_border_pen)
dc.DrawLine(wx.Point(1, 1), wx.Point(3, 1))
dc.DrawLine(wx.Point(3, 1), wx.Point(3, 3))
dc.DrawLine(wx.Point(3, 3), wx.Point(1, 3))
dc.DrawLine(wx.Point(1, 3), wx.Point(1, 1))
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
icon = tab.page.GetIcon()
if icon.IsOk():
x = tab.rect.x + 4
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS == 0:
x = tab.rect.x + (tab.rect.width - icon.GetWidth()) / 2
dc.DrawBitmap(icon, x, tab.rect.y + 1 + (tab.rect.height - 1 - icon.GetHeight()) / 2, True)
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS:
label = tab.page.GetLabel()
if label.strip():
dc.SetFont(self._tab_label_font)
if tab.active:
dc.SetTextForeground(self._tab_active_label_colour)
elif tab.hovered:
dc.SetTextForeground(self._tab_hover_label_colour)
else:
dc.SetTextForeground(self._tab_label_colour)
dc.SetBackgroundMode(wx.TRANSPARENT)
text_width, text_height = dc.GetTextExtent(label)
width = tab.rect.width - 5
x = tab.rect.x + 3
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
x += 3 + tab.page.GetIcon().GetWidth()
width -= 3 + tab.page.GetIcon().GetWidth()
y = tab.rect.y + (tab.rect.height - text_height) / 2
if width <= text_width:
dc.SetClippingRegion(x, tab.rect.y, width, tab.rect.height)
dc.DrawText(label, x, y)
else:
dc.DrawText(label, x + (width - text_width) / 2 + 1, y)
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (16-07-2019)
# Changes are made to lines (in the link): 1691-1719 in order to remove wrap-around border of the panels
def DrawPanelBorder(self, dc, rect, primary_colour, secondary_colour):
dc.SetPen(primary_colour)
# draw the separating borders
#dc.DrawLine(wx.Point(1, 2), wx.Point(1, rect.height - 1))
dc.DrawLine(wx.Point(rect.width, 2), wx.Point(rect.width, rect.height - 1))
# draw the top border in the page top border colour
dc.SetPen(self._page_border_top_pen)
dc.DrawLine(wx.Point(0, 0), wx.Point(rect.width + 1, 0))
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (18-07-2019)
# Changes are made to lines (in the link): 1450-1451 in order to extend panel colouring slightly to allow for a single border
# Changes are made to lines (in the link): 1480 due to an error with dc.DrawRectangleRect (changed to dc.DrawRectangle)
# notice this solution results in a slight flickering when moving the mouse between panels
def DrawPanelBackground(self, dc, wnd, rect):
self.DrawPartialPageBackground(dc, wnd, rect, False)
true_rect = wx.Rect(*rect)
true_rect = self.RemovePanelPadding(true_rect)
dc.SetFont(self._panel_label_font)
dc.SetPen(wx.TRANSPARENT_PEN)
has_ext_button = wnd.HasExtButton()
if wnd.IsHovered():
dc.SetBrush(self._panel_hover_label_background_brush)
dc.SetTextForeground(self._panel_hover_label_colour)
else:
dc.SetBrush(self._panel_label_background_brush)
dc.SetTextForeground(self._panel_label_colour)
label_rect = wx.Rect(*true_rect)
label = wnd.GetLabel().strip()
clip_label = False
label_size = wx.Size(*dc.GetTextExtent(label))
label_rect.SetX(label_rect.GetX()) # + 1
label_rect.SetWidth(label_rect.GetWidth()) # - 2
label_rect.SetHeight(label_size.GetHeight() + 2)
label_rect.SetY(true_rect.GetBottom() - label_rect.GetHeight())
label_height = label_rect.GetHeight()
label_bg_rect = wx.Rect(*label_rect)
if has_ext_button:
label_rect.SetWidth(label_rect.GetWidth() - 13)
if label_size.GetWidth() > label_rect.GetWidth():
# Test if there is enough length for 3 letters and ...
new_label = label[0:3] + "..."
label_size = wx.Size(*dc.GetTextExtent(new_label))
if label_size.GetWidth() > label_rect.GetWidth():
# Not enough room for three characters and ...
# Display the entire label and just crop it
clip_label = True
else:
# Room for some characters and ...
# Display as many characters as possible and append ...
for l in range(len(label) - 1, 3, -1):
new_label = label[0:l] + "..."
label_size = wx.Size(*dc.GetTextExtent(new_label))
if label_size.GetWidth() <= label_rect.GetWidth():
label = new_label
break
dc.DrawRectangle(label_rect)
if clip_label:
clip = wx.DCClipper(dc, label_rect)
dc.DrawText(label, label_rect.GetX(), label_rect.GetY() + (label_rect.GetHeight() - label_size.GetHeight()) / 2)
else:
dc.DrawText(label, label_rect.GetX() + (label_rect.GetWidth() - label_size.GetWidth()) / 2,
label_rect.GetY() + (label_rect.GetHeight() - label_size.GetHeight()) / 2)
if has_ext_button:
if wnd.IsExtButtonHovered():
dc.SetPen(self._panel_hover_button_border_pen)
dc.SetBrush(self._panel_hover_button_background_brush)
dc.DrawRoundedRectangle(label_rect.GetRight(), label_rect.GetBottom() - 13, 13, 13, 1)
dc.DrawBitmap(self._panel_extension_bitmap[1], label_rect.GetRight() + 3, label_rect.GetBottom() - 10, True)
else:
dc.DrawBitmap(self._panel_extension_bitmap[0], label_rect.GetRight() + 3, label_rect.GetBottom() - 10, True)
if wnd.IsHovered():
client_rect = wx.Rect(*true_rect)
client_rect.SetX(client_rect.GetX() + 1)
client_rect.SetWidth(client_rect.GetWidth() - 2)
client_rect.SetY(client_rect.GetY() + 1)
client_rect.SetHeight( - 2 + label_height)
self.DrawPartialPageBackground(dc, wnd, client_rect, True)
self.DrawPanelBorder(dc, true_rect, self._panel_border_pen, self._panel_border_gradient_pen)
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (17-07-2019)
# Changes are made to lines (in the link): 1229-1240 in order to remove rounded pages and allow for a coloured top line
def DrawPageBackground(self, dc, wnd, rect):
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._tab_ctrl_background_brush)
edge = wx.Rect(*rect)
edge.SetWidth(2)
dc.DrawRectangle(edge.GetX(), edge.GetY(), edge.GetWidth(), edge.GetHeight())
edge.SetX(edge.GetX() + rect.GetWidth() - 2)
dc.DrawRectangle(edge.GetX(), edge.GetY(), edge.GetWidth(), edge.GetHeight())
edge = wx.Rect(*rect)
edge.SetHeight(2)
edge.SetY(edge.GetY() + rect.GetHeight() - edge.GetHeight())
dc.DrawRectangle(edge.GetX(), edge.GetY(), edge.GetWidth(), edge.GetHeight())
background = wx.Rect(*rect)
background.SetX(background.GetX() + 2)
background.SetWidth(background.GetWidth() - 4)
background.SetHeight(background.GetHeight() - 2)
background.SetHeight(background.GetHeight() / 5)
dc.GradientFillLinear(background, self._page_background_top_colour,
self._page_background_top_gradient_colour, wx.SOUTH)
background.SetY(background.GetY() + background.GetHeight())
background.SetHeight(rect.GetHeight() - 2 - background.GetHeight())
dc.GradientFillLinear(background, self._page_background_colour,
self._page_background_gradient_colour, wx.SOUTH)
# draw bottom and the sides
dc.SetPen(self._page_border_pen)
border_points = [wx.Point() for i in range(4)]
border_points[0] = wx.Point(0, 0) # upper left
border_points[1] = wx.Point(0, rect.height - 1) # lower left
border_points[2] = wx.Point(rect.width + 1, rect.height - 1) # lower right
border_points[3] = wx.Point(rect.width + 1, 0) # upper right corner
dc.DrawLines(border_points, rect.x, rect.y)
# draw top line
dc.SetPen(self._page_border_top_pen)
dc.DrawLine(border_points[0], border_points[3])
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/gradientbutton.py (17-07-2019)
# Changes are made to line (in the link): 476-489 in order to remove the rounding of the button (added zero radius)
def GetPathGradientButton(self, gc, rc, r):
x, y, w, h = rc
r = 0
path = gc.CreatePath()
path.AddRoundedRectangle(x, y, w, h, r)
path.CloseSubpath()
return path
| 51.515106 | 152 | 0.616544 |
import types
import wx.lib.agw.ribbon as rb
from wx.lib.agw.gradientbutton import GradientButton
from wx.lib.agw.ribbon.art import RIBBON_BAR_SHOW_PAGE_LABELS, RIBBON_BAR_SHOW_PAGE_ICONS
from _ids import *
import _icons as ico
from widgets.customized_menu import CustomMenuItem, CustomMenu
class Ribbon(rb.RibbonBar):
def __init__(self, parent):
super().__init__(parent=parent, id=wx.ID_ANY, agwStyle=rb.RIBBON_BAR_DEFAULT_STYLE | rb.RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS)
self.file_page = RibbonFileTab(self)
self.file_menu = RibbonFileMenu()
self.file_page.Bind(wx.EVT_BUTTON, self.OnFileTabMenu)
home = rb.RibbonPage(self, wx.ID_ANY, 'Home')
window_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Window')
self.window = rb.RibbonButtonBar(window_panel)
self.window.AddHybridButton(ID_WINDOW, 'Window', ico.window_32x32.GetBitmap(), 'Add new window')
self.window.AddSimpleButton(ID_WINDOW_REFRESH, 'Refresh', ico.window_refresh_32x32.GetBitmap(), 'Refresh active window')
self.window.AddToggleButton(ID_WINDOW_PRESENT, 'Present', ico.window_32x32.GetBitmap(), 'Change to presentation mode')
generic_chart_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Generic charts')
self.generic_chart = rb.RibbonButtonBar(generic_chart_panel)
self.generic_chart.AddSimpleButton(ID_CHART_CARTESIAN, 'Cartesian', ico.cartesian_chart_32x32.GetBitmap(), 'Add new cartesian chart')
self.generic_chart.AddSimpleButton(ID_CHART_STACKED, 'Stacked', ico.stacked_chart_32x32.GetBitmap(), 'Add new stacked chart')
self.generic_chart.AddSimpleButton(ID_CHART_BAR, 'Bar', ico.bar_chart_32x32.GetBitmap(), 'Add new bar chart')
self.generic_chart.AddSimpleButton(ID_CHART_BUBBLE, 'Bubble', ico.bubble_chart_32x32.GetBitmap(), 'Add new bubble chart')
self.generic_chart.AddSimpleButton(ID_CHART_HISTOGRAM, 'Histogram', ico.histogram_chart_32x32.GetBitmap(), 'Add new histogram')
self.generic_chart.AddSimpleButton(ID_CHART_MAP, 'Map', ico.map_chart_32x32.GetBitmap(), 'Add new map')
self.generic_chart.AddSimpleButton(ID_CHART_3D, '3D', ico.threeD_chart_32x32.GetBitmap(), 'Add 3D chart')
custom_chart_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Custom charts')
self.custom_chart = rb.RibbonButtonBar(custom_chart_panel)
self.custom_chart.AddSimpleButton(ID_CHART_FIT, 'Fits', ico.fit_chart_32x32.GetBitmap(), 'Add new fit chart')
self.custom_chart.AddSimpleButton(ID_CHART_TREND, 'Trends', ico.trend_chart_32x32.GetBitmap(), 'Add new trend chart')
self.custom_chart.AddSimpleButton(ID_CHART_INCREMENT, 'Increments', ico.increment_chart_32x32.GetBitmap(), 'Add new increment chart')
self.custom_chart.AddSimpleButton(ID_CHART_PROFILES, 'Profiles', ico.profiles_chart_32x32.GetBitmap(), 'Add new profiles chart')
export_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Export')
self.export = rb.RibbonButtonBar(export_panel)
self.export.AddSimpleButton(ID_EXPORT_EXCEL, 'Export', ico.export_spreadsheet_32x32.GetBitmap(), 'Open profile export frame')
correlation_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Correlation')
self.correlation = rb.RibbonButtonBar(correlation_panel)
self.correlation.AddSimpleButton(ID_CORRELATION_ENT, 'Entity', ico.correlation_entity_32x32.GetBitmap(), 'Open entity correlation frame')
self.correlation.AddSimpleButton(ID_CORRELATION_VAR, 'Variable', ico.correlation_variable_32x32.GetBitmap(), 'Open variable correlation frame')
summary_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Summary')
self.summary = rb.RibbonButtonBar(summary_panel)
self.summary.AddSimpleButton(ID_SUMMARY, 'Summary', ico.summary_32x32.GetBitmap(), 'Add new summary variable')
entities = rb.RibbonPage(self, wx.ID_ANY, 'Entities')
folder_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Folders')
self.folder = rb.RibbonButtonBar(folder_panel)
self.folder.AddSimpleButton(ID_FOLDER, 'Folder', ico.folder_closed_32x32.GetBitmap(), 'Add new folder')
portfolio_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Portfolio')
self.portfolio = rb.RibbonButtonBar(portfolio_panel)
self.portfolio.AddSimpleButton(ID_ANALOGUE, 'Analogue', ico.analogue_32x32.GetBitmap(), 'Add new analogue')
self.portfolio.AddSimpleButton(ID_TYPECURVE, 'Typecurve', ico.trend_chart_32x32.GetBitmap(), 'Add new typecurve')
self.portfolio.AddSimpleButton(ID_SCALING, 'Scaling', ico.scaling_chart_32x32.GetBitmap(), 'Add new scaling')
subsurface_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Subsurface')
self.subsurface = rb.RibbonButtonBar(subsurface_panel)
self.subsurface.AddSimpleButton(ID_RESERVOIR, 'Reservoir', ico.reservoir_32x32.GetBitmap(), 'Add new reservoir')
self.subsurface.AddSimpleButton(ID_THEME, 'Theme', ico.theme_32x32.GetBitmap(), 'Add new theme')
self.subsurface.AddSimpleButton(ID_POLYGON, 'Polygon', ico.polygon_32x32.GetBitmap(), 'Add new polygon')
self.subsurface.AddSimpleButton(ID_PRODUCER, 'Producer', ico.producer_oil_gas_32x32.GetBitmap(), 'Add new producer')
self.subsurface.AddSimpleButton(ID_INJECTOR, 'Injector', ico.injector_wag_32x32.GetBitmap(), 'Add new injector')
facility_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Facility')
self.facility = rb.RibbonButtonBar(facility_panel)
self.facility.AddSimpleButton(ID_PLATFORM, 'Platform', ico.platforms_32x32.GetBitmap(), 'Add new platform')
self.facility.AddSimpleButton(ID_PROCESSOR, 'Processor', ico.processor_32x32.GetBitmap(), 'Add new processor')
self.facility.AddSimpleButton(ID_PIPELINE, 'Pipeline', ico.pipeline_32x32.GetBitmap(), 'Add new pipeline')
concession_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Concession')
self.concession = rb.RibbonButtonBar(concession_panel)
self.concession.AddSimpleButton(ID_FIELD, 'Field', ico.field_32x32.GetBitmap(), 'Add new field')
self.concession.AddSimpleButton(ID_BLOCK, 'Block', ico.block_32x32.GetBitmap(), 'Add new block')
simulation_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Simulation')
self.simulation = rb.RibbonButtonBar(simulation_panel)
self.simulation.AddSimpleButton(ID_PROJECT, 'Project', ico.project_32x32.GetBitmap(), 'Add new project')
self.simulation.AddSimpleButton(ID_HISTORY, 'History', ico.history_match_32x32.GetBitmap(), 'Add new history')
self.simulation.AddSimpleButton(ID_SCENARIO, 'Scenario', ico.scenario_32x32.GetBitmap(), 'Add new scenario')
self.simulation.AddSimpleButton(ID_PREDICTION, 'Prediction', ico.prediction_32x32.GetBitmap(), 'Add new prediction')
self.ChangeArtProvider()
self.Realize()
def OnFileTabMenu(self, event):
button = event.GetEventObject()
button_size = button.GetSize()
button_pos = button.GetPosition()
button_pos = button.GetParent().ClientToScreen(button_pos)
self.file_menu.SetOwnerHeight(button_size.y)
self.file_menu.Popup(wx.Point(button_pos.x, button_pos.y), self)
def EnableButtons(self, state, entity_mgr=None):
self.file_menu.save.Enable(state)
self.file_menu.save_as.Enable(state)
self.file_menu.close.Enable(state)
self.file_menu.settings.Enable(state)
self.folder.EnableButton(ID_FOLDER, state)
self.window.EnableButton(ID_WINDOW, state)
self.window.EnableButton(ID_WINDOW_REFRESH, state)
self.window.EnableButton(ID_WINDOW_PRESENT, state)
self.generic_chart.EnableButton(ID_CHART_CARTESIAN, state)
self.generic_chart.EnableButton(ID_CHART_STACKED, state)
self.generic_chart.EnableButton(ID_CHART_BAR, state)
self.generic_chart.EnableButton(ID_CHART_BUBBLE, state)
self.generic_chart.EnableButton(ID_CHART_HISTOGRAM, state)
self.generic_chart.EnableButton(ID_CHART_MAP, state)
self.generic_chart.EnableButton(ID_CHART_3D, state)
self.custom_chart.EnableButton(ID_CHART_FIT, state)
self.custom_chart.EnableButton(ID_CHART_TREND, False)
self.custom_chart.EnableButton(ID_CHART_INCREMENT, False)
self.custom_chart.EnableButton(ID_CHART_PROFILES, False)
self.export.EnableButton(ID_EXPORT_EXCEL, state)
self.correlation.EnableButton(ID_CORRELATION_ENT, state)
self.correlation.EnableButton(ID_CORRELATION_VAR, state)
self.summary.EnableButton(ID_SUMMARY, state)
self.portfolio.EnableButton(ID_ANALOGUE, state)
self.portfolio.EnableButton(ID_SCALING, state)
if state:
if entity_mgr.GetAnalogues():
self.portfolio.EnableButton(ID_TYPECURVE, state)
else:
self.portfolio.EnableButton(ID_TYPECURVE, False)
else:
self.portfolio.EnableButton(ID_TYPECURVE, state)
self.subsurface.EnableButton(ID_RESERVOIR, state)
if state:
if entity_mgr.GetReservoirs():
self.subsurface.EnableButton(ID_THEME, state)
if entity_mgr.GetThemes():
self.subsurface.EnableButton(ID_POLYGON, state)
if entity_mgr.GetPolygons():
self.subsurface.EnableButton(ID_PRODUCER, state)
self.subsurface.EnableButton(ID_INJECTOR, state)
else:
self.subsurface.EnableButton(ID_PRODUCER, False)
self.subsurface.EnableButton(ID_INJECTOR, False)
else:
self.subsurface.EnableButton(ID_POLYGON, False)
else:
self.subsurface.EnableButton(ID_THEME, False)
else:
self.subsurface.EnableButton(ID_THEME, state)
self.subsurface.EnableButton(ID_POLYGON, state)
self.subsurface.EnableButton(ID_PRODUCER, state)
self.subsurface.EnableButton(ID_INJECTOR, state)
self.facility.EnableButton(ID_PLATFORM, state)
self.facility.EnableButton(ID_PIPELINE, state)
if state:
if entity_mgr.GetPlatforms():
self.facility.EnableButton(ID_PROCESSOR, state)
else:
self.facility.EnableButton(ID_PROCESSOR, False)
else:
self.facility.EnableButton(ID_PROCESSOR, state)
self.concession.EnableButton(ID_FIELD, state)
self.concession.EnableButton(ID_BLOCK, state)
self.simulation.EnableButton(ID_PROJECT, state)
if state:
if entity_mgr.GetProjects():
self.simulation.EnableButton(ID_HISTORY, state)
self.simulation.EnableButton(ID_SCENARIO, state)
if entity_mgr.GetScenarios():
self.simulation.EnableButton(ID_PREDICTION, state)
else:
self.simulation.EnableButton(ID_PREDICTION, False)
else:
self.simulation.EnableButton(ID_HISTORY, False)
self.simulation.EnableButton(ID_SCENARIO, False)
else:
self.simulation.EnableButton(ID_HISTORY, state)
self.simulation.EnableButton(ID_SCENARIO, state)
self.simulation.EnableButton(ID_PREDICTION, state)
def ChangeArtProvider(self):
art = self.GetArtProvider()
art.DrawTab = types.MethodType(DrawTab, art)
art.DrawPanelBackground = types.MethodType(DrawPanelBackground, art)
art.DrawPanelBorder = types.MethodType(DrawPanelBorder, art)
art.DrawPageBackground = types.MethodType(DrawPageBackground, art)
art._cached_tab_separator_visibility = -10.0
art._tab_separation_size = 0
art._page_border_left = 1
art._page_border_top = 0
art._page_border_right = 0
art._page_border_bottom = 2
art._panel_x_separation_size = -1
art._panel_y_separation_size = 0
art._cached_tab_separator = wx.NullBitmap
art._tab_label_colour = wx.Colour(255, 255, 255)
art._tab_active_label_colour = wx.Colour(0, 0, 0)
art._tab_hover_label_colour = wx.Colour(255, 255, 255)
art._tab_active_background_colour = wx.Colour(255, 255, 255)
art._tab_active_background_gradient_colour = wx.Colour(230, 230, 230)
art._tab_hover_background_top_colour = wx.Colour(100, 100, 100)
art._tab_hover_background_top_gradient_colour = wx.Colour(105, 105, 105)
art._tab_hover_background_colour = wx.Colour(105, 105, 105)
art._tab_hover_background_gradient_colour = wx.Colour(110, 110, 110)
art._tab_ctrl_background_brush = wx.Brush(wx.Colour(55, 55, 55))
art._tab_border_pen = wx.Pen(wx.Colour(55, 55, 55))
art._panel_label_colour = wx.Colour(0, 0, 0)
art._panel_hover_label_colour = wx.Colour(0, 0, 0)
art._panel_minimised_label_colour = wx.Colour(0, 0, 0)
# art._panel_active_background_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# art._panel_active_background_gradient_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# art._panel_active_background_top_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# art._panel_active_background_top_gradient_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# sets the colour of the background of the panel label
art._panel_label_background_brush = wx.Brush(wx.Colour(230, 230, 230))
art._panel_hover_label_background_brush = wx.Brush(wx.Colour(230, 230, 230))
# dont' know
art._panel_border_pen = wx.Pen(wx.Colour(143, 143, 143))
art._panel_border_gradient_pen = wx.Pen(wx.Colour(143, 143, 143))
art._page_background_top_colour = wx.Colour(230, 230, 230)
art._page_background_top_gradient_colour = wx.Colour(242, 242, 242)
art._page_background_colour = wx.Colour(242, 242, 242)
art._page_background_gradient_colour = wx.Colour(255, 255, 255)
art._page_hover_background_top_colour = art._page_background_top_colour
art._page_hover_background_top_gradient_colour = art._page_background_top_gradient_colour
art._page_hover_background_colour = art._page_background_colour
art._page_hover_background_gradient_colour = art._page_background_gradient_colour
art._page_border_pen = wx.Pen(wx.Colour(83, 83, 83))
art._page_border_top_pen = wx.Pen(wx.Colour(244, 170, 0))
art._button_bar_label_colour = wx.Colour(0, 0, 0)
art._button_bar_active_background_top_colour = wx.Colour(255, 218, 109)
art._button_bar_active_background_top_gradient_colour = wx.Colour(255, 218, 109)
art._button_bar_active_background_colour = wx.Colour(255, 218, 109)
art._button_bar_active_background_gradient_colour = wx.Colour(255, 218, 109)
art._button_bar_hover_background_top_colour = wx.Colour(255, 227, 125)
art._button_bar_hover_background_top_gradient_colour = wx.Colour(254, 233, 157)
art._button_bar_hover_background_colour = wx.Colour(254, 233, 157)
art._button_bar_hover_background_gradient_colour = wx.Colour(253, 243, 204)
art._button_bar_active_border_pen = wx.Pen(wx.Colour(194, 150, 61))
art._button_bar_hover_border_pen = wx.Pen(wx.Colour(242, 201, 88))
self.SetArtProvider(art)
class RibbonFileMenu(CustomMenu):
def __init__(self):
super().__init__()
self.save = CustomMenuItem(self, id=wx.ID_ANY, label='Save project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.save_32x32.GetBitmap())
self.save_as = CustomMenuItem(self, id=wx.ID_ANY, label='Save project as', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.save_as_32x32.GetBitmap())
self.open = CustomMenuItem(self, id=wx.ID_ANY, label='Open project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.project_open_32x32.GetBitmap())
self.close = CustomMenuItem(self, id=wx.ID_ANY, label='Close project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.project_close_32x32.GetBitmap())
self.new = CustomMenuItem(self, id=wx.ID_ANY, label='New project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_OTHER, wx.Size(32, 32)))
self.settings = CustomMenuItem(self, id=wx.ID_ANY, label='Settings', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.settings_32x32.GetBitmap())
self.AppendItem(self.save)
self.AppendItem(self.save_as)
self.AppendSeparator()
self.AppendItem(self.open)
self.AppendItem(self.close)
self.AppendItem(self.new)
self.AppendSeparator()
self.AppendItem(self.settings)
class RibbonFileTab(GradientButton):
def __init__(self, parent):
super().__init__(parent=parent, id=wx.ID_ANY, label='File', pos=(1, 2), size=(49, 24))
self.GetPath = types.MethodType(GetPathGradientButton, self)
self.SetTopStartColour(wx.Colour(236, 201, 10))
self.SetTopEndColour(wx.Colour(250, 192, 0))
self.SetBottomStartColour(wx.Colour(250, 192, 0))
self.SetBottomEndColour(wx.Colour(244, 170, 0))
self.SetPressedTopColour(wx.Colour(244, 170, 0))
self.SetPressedBottomColour(wx.Colour(244, 170, 0))
self.SetForegroundColour(wx.Colour(0, 0, 0))
def DrawTab(self, dc, wnd, tab):
if tab.rect.height <= 2:
return
if tab.active or tab.hovered:
if tab.active:
background = wx.Rect(*tab.rect)
background.SetX(background.GetX() + 2)
background.SetY(background.GetY() + 2)
background.SetWidth(background.GetWidth() - 4)
background.SetHeight(background.GetHeight() - 2)
dc.GradientFillLinear(background, self._tab_active_background_colour,
self._tab_active_background_gradient_colour, wx.SOUTH)
elif tab.hovered:
background = wx.Rect(*tab.rect)
background.SetX(background.GetX() + 2)
background.SetY(background.GetY() + 2)
background.SetWidth(background.GetWidth() - 4)
background.SetHeight(background.GetHeight() - 3)
h = background.GetHeight()
background.SetHeight(background.GetHeight() / 2)
dc.GradientFillLinear(background, self._tab_hover_background_top_colour,
self._tab_hover_background_top_gradient_colour, wx.SOUTH)
background.SetY(background.GetY() + background.GetHeight())
background.SetHeight(h - background.GetHeight())
dc.GradientFillLinear(background, self._tab_hover_background_colour,
self._tab_hover_background_gradient_colour, wx.SOUTH)
dc.SetPen(self._tab_border_pen)
dc.DrawLine(wx.Point(1, 1), wx.Point(3, 1))
dc.DrawLine(wx.Point(3, 1), wx.Point(3, 3))
dc.DrawLine(wx.Point(3, 3), wx.Point(1, 3))
dc.DrawLine(wx.Point(1, 3), wx.Point(1, 1))
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
icon = tab.page.GetIcon()
if icon.IsOk():
x = tab.rect.x + 4
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS == 0:
x = tab.rect.x + (tab.rect.width - icon.GetWidth()) / 2
dc.DrawBitmap(icon, x, tab.rect.y + 1 + (tab.rect.height - 1 - icon.GetHeight()) / 2, True)
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS:
label = tab.page.GetLabel()
if label.strip():
dc.SetFont(self._tab_label_font)
if tab.active:
dc.SetTextForeground(self._tab_active_label_colour)
elif tab.hovered:
dc.SetTextForeground(self._tab_hover_label_colour)
else:
dc.SetTextForeground(self._tab_label_colour)
dc.SetBackgroundMode(wx.TRANSPARENT)
text_width, text_height = dc.GetTextExtent(label)
width = tab.rect.width - 5
x = tab.rect.x + 3
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
x += 3 + tab.page.GetIcon().GetWidth()
width -= 3 + tab.page.GetIcon().GetWidth()
y = tab.rect.y + (tab.rect.height - text_height) / 2
if width <= text_width:
dc.SetClippingRegion(x, tab.rect.y, width, tab.rect.height)
dc.DrawText(label, x, y)
else:
dc.DrawText(label, x + (width - text_width) / 2 + 1, y)
def DrawPanelBorder(self, dc, rect, primary_colour, secondary_colour):
dc.SetPen(primary_colour)
dc.DrawLine(wx.Point(rect.width, 2), wx.Point(rect.width, rect.height - 1))
dc.SetPen(self._page_border_top_pen)
dc.DrawLine(wx.Point(0, 0), wx.Point(rect.width + 1, 0))
def DrawPanelBackground(self, dc, wnd, rect):
self.DrawPartialPageBackground(dc, wnd, rect, False)
true_rect = wx.Rect(*rect)
true_rect = self.RemovePanelPadding(true_rect)
dc.SetFont(self._panel_label_font)
dc.SetPen(wx.TRANSPARENT_PEN)
has_ext_button = wnd.HasExtButton()
if wnd.IsHovered():
dc.SetBrush(self._panel_hover_label_background_brush)
dc.SetTextForeground(self._panel_hover_label_colour)
else:
dc.SetBrush(self._panel_label_background_brush)
dc.SetTextForeground(self._panel_label_colour)
label_rect = wx.Rect(*true_rect)
label = wnd.GetLabel().strip()
clip_label = False
label_size = wx.Size(*dc.GetTextExtent(label))
label_rect.SetX(label_rect.GetX())
label_rect.SetWidth(label_rect.GetWidth())
label_rect.SetHeight(label_size.GetHeight() + 2)
label_rect.SetY(true_rect.GetBottom() - label_rect.GetHeight())
label_height = label_rect.GetHeight()
label_bg_rect = wx.Rect(*label_rect)
if has_ext_button:
label_rect.SetWidth(label_rect.GetWidth() - 13)
if label_size.GetWidth() > label_rect.GetWidth():
new_label = label[0:3] + "..."
label_size = wx.Size(*dc.GetTextExtent(new_label))
if label_size.GetWidth() > label_rect.GetWidth():
clip_label = True
else:
for l in range(len(label) - 1, 3, -1):
new_label = label[0:l] + "..."
label_size = wx.Size(*dc.GetTextExtent(new_label))
if label_size.GetWidth() <= label_rect.GetWidth():
label = new_label
break
dc.DrawRectangle(label_rect)
if clip_label:
clip = wx.DCClipper(dc, label_rect)
dc.DrawText(label, label_rect.GetX(), label_rect.GetY() + (label_rect.GetHeight() - label_size.GetHeight()) / 2)
else:
dc.DrawText(label, label_rect.GetX() + (label_rect.GetWidth() - label_size.GetWidth()) / 2,
label_rect.GetY() + (label_rect.GetHeight() - label_size.GetHeight()) / 2)
if has_ext_button:
if wnd.IsExtButtonHovered():
dc.SetPen(self._panel_hover_button_border_pen)
dc.SetBrush(self._panel_hover_button_background_brush)
dc.DrawRoundedRectangle(label_rect.GetRight(), label_rect.GetBottom() - 13, 13, 13, 1)
dc.DrawBitmap(self._panel_extension_bitmap[1], label_rect.GetRight() + 3, label_rect.GetBottom() - 10, True)
else:
dc.DrawBitmap(self._panel_extension_bitmap[0], label_rect.GetRight() + 3, label_rect.GetBottom() - 10, True)
if wnd.IsHovered():
client_rect = wx.Rect(*true_rect)
client_rect.SetX(client_rect.GetX() + 1)
client_rect.SetWidth(client_rect.GetWidth() - 2)
client_rect.SetY(client_rect.GetY() + 1)
client_rect.SetHeight( - 2 + label_height)
self.DrawPartialPageBackground(dc, wnd, client_rect, True)
self.DrawPanelBorder(dc, true_rect, self._panel_border_pen, self._panel_border_gradient_pen)
def DrawPageBackground(self, dc, wnd, rect):
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._tab_ctrl_background_brush)
edge = wx.Rect(*rect)
edge.SetWidth(2)
dc.DrawRectangle(edge.GetX(), edge.GetY(), edge.GetWidth(), edge.GetHeight())
edge.SetX(edge.GetX() + rect.GetWidth() - 2)
dc.DrawRectangle(edge.GetX(), edge.GetY(), edge.GetWidth(), edge.GetHeight())
edge = wx.Rect(*rect)
edge.SetHeight(2)
edge.SetY(edge.GetY() + rect.GetHeight() - edge.GetHeight())
dc.DrawRectangle(edge.GetX(), edge.GetY(), edge.GetWidth(), edge.GetHeight())
background = wx.Rect(*rect)
background.SetX(background.GetX() + 2)
background.SetWidth(background.GetWidth() - 4)
background.SetHeight(background.GetHeight() - 2)
background.SetHeight(background.GetHeight() / 5)
dc.GradientFillLinear(background, self._page_background_top_colour,
self._page_background_top_gradient_colour, wx.SOUTH)
background.SetY(background.GetY() + background.GetHeight())
background.SetHeight(rect.GetHeight() - 2 - background.GetHeight())
dc.GradientFillLinear(background, self._page_background_colour,
self._page_background_gradient_colour, wx.SOUTH)
dc.SetPen(self._page_border_pen)
border_points = [wx.Point() for i in range(4)]
border_points[0] = wx.Point(0, 0)
border_points[1] = wx.Point(0, rect.height - 1)
border_points[2] = wx.Point(rect.width + 1, rect.height - 1)
border_points[3] = wx.Point(rect.width + 1, 0)
dc.DrawLines(border_points, rect.x, rect.y)
dc.SetPen(self._page_border_top_pen)
dc.DrawLine(border_points[0], border_points[3])
def GetPathGradientButton(self, gc, rc, r):
x, y, w, h = rc
r = 0
path = gc.CreatePath()
path.AddRoundedRectangle(x, y, w, h, r)
path.CloseSubpath()
return path
| true | true |
1c317d7a4c0490b75b23a06e759f39580cfe1cc2 | 4,309 | py | Python | config/configs/inbound.py | cming091/datasync | f526804ff00931ff39af2b9e1e14c28e5afb7c43 | [
"Apache-2.0"
] | 1 | 2019-10-31T02:07:02.000Z | 2019-10-31T02:07:02.000Z | config/configs/inbound.py | cming091/datasync | f526804ff00931ff39af2b9e1e14c28e5afb7c43 | [
"Apache-2.0"
] | null | null | null | config/configs/inbound.py | cming091/datasync | f526804ff00931ff39af2b9e1e14c28e5afb7c43 | [
"Apache-2.0"
] | null | null | null | from .base import BaseConfig
class Config(BaseConfig):
name = 'inbound'
sql = ["""
CREATE TABLE inboundOrdersBase
(
_id VARCHAR(100) PRIMARY KEY
, ito VARCHAR(100)
, stock_type VARCHAR(100)
, purchase_id VARCHAR(100)
, code VARCHAR(100)
, contract VARCHAR(100)
, status INT
, utime int default 0
)
;
""",
"""CREATE TABLE inboundOrdersBoxesMaterials
(
_id VARCHAR(100)
, boxesCode VARCHAR(100)
, mid VARCHAR(100)
, package VARCHAR(100)
, weight VARCHAR(100)
, volume VARCHAR(100)
, boxesStatus INT
, sapid VARCHAR(100)
, code VARCHAR(100)
, numCount INT
, status INT
, carriage_time VARCHAR(100)
, utime int default 0
)
;""",
]
@property
def config(self):
return [{
'fetcher': {
'name': 'mClient',
'params':self.setting['fParams'][0:3],
'pattern':{"ns": "{}.wms.inbound_orders".format(self.setting['fParams'][3])},
'freq': 24
},
'input': [
{'db': {'name': 'mClient', 'params':[*self.setting['iParams'],'wms.inbound_orders'] },
'op': {'fun': 'mFind', 'params': {}},
'columns': [
('to', 'string', 'ito'),
('stock_type', 'string', 'stock_type'),
('purchase_id', 'string', 'purchase_id'),
('code', 'string', 'code'),
('contract', 'string', 'contract'),
('status', 'int', 'status'),
('_id', 'string', '_id'),
],
}
],
'output':
{'db':
{'name': 'mssConnAndCur',
'params': self.setting['oParams'],
'table': 'inboundOrdersBase',
'unique':'_id'
},
'op':
{'fun': 'mssInsert'},
}
},
{
'fetcher': {
'name': 'mClient',
'params': self.setting['fParams'][0:3],
'pattern': {"ns": "{}.wms.inbound_orders".format(self.setting['fParams'][3])},
'freq': 24
},
'input': [
{'db': {'name': 'mClient', 'params': [*self.setting['iParams'], 'wms.inbound_orders']},
'op': {'fun': 'mAggregate', 'params': [{"$unwind":"$boxes"},{"$unwind":"$boxes.materials"},{"$project":{"_id":1,'code':1,'status':1,'boxes':1}}]},
'columns': [
('_id', 'string', '_id'),
('code', 'string', 'code'),
('status', 'int', 'status'),
('boxes.status', 'int', 'boxesStatus'),
('boxes.code', 'string', 'boxesCode'),
('boxes.carriage_time', 'string', 'carriage_time'),
('boxes.materials.mid', 'string', 'mid'),
('boxes.materials.sapid', 'string', 'sapid'),
('boxes.materials.count', 'int', 'numCount'),
('boxes.physics.package', 'string', 'package'),
('boxes.physics.weight', 'string', 'weight'),
('boxes.physics.volume', 'string', 'volume'),
],
}
],
'output':
{'db':
{'name': 'mssConnAndCur',
'params': self.setting['oParams'],
'table': 'inboundOrdersBoxesMaterials',
'unique': '_id'
},
'op':
{'fun': 'mssInsert'},
}
}
]
| 38.132743 | 163 | 0.35832 | from .base import BaseConfig
class Config(BaseConfig):
name = 'inbound'
sql = ["""
CREATE TABLE inboundOrdersBase
(
_id VARCHAR(100) PRIMARY KEY
, ito VARCHAR(100)
, stock_type VARCHAR(100)
, purchase_id VARCHAR(100)
, code VARCHAR(100)
, contract VARCHAR(100)
, status INT
, utime int default 0
)
;
""",
"""CREATE TABLE inboundOrdersBoxesMaterials
(
_id VARCHAR(100)
, boxesCode VARCHAR(100)
, mid VARCHAR(100)
, package VARCHAR(100)
, weight VARCHAR(100)
, volume VARCHAR(100)
, boxesStatus INT
, sapid VARCHAR(100)
, code VARCHAR(100)
, numCount INT
, status INT
, carriage_time VARCHAR(100)
, utime int default 0
)
;""",
]
@property
def config(self):
return [{
'fetcher': {
'name': 'mClient',
'params':self.setting['fParams'][0:3],
'pattern':{"ns": "{}.wms.inbound_orders".format(self.setting['fParams'][3])},
'freq': 24
},
'input': [
{'db': {'name': 'mClient', 'params':[*self.setting['iParams'],'wms.inbound_orders'] },
'op': {'fun': 'mFind', 'params': {}},
'columns': [
('to', 'string', 'ito'),
('stock_type', 'string', 'stock_type'),
('purchase_id', 'string', 'purchase_id'),
('code', 'string', 'code'),
('contract', 'string', 'contract'),
('status', 'int', 'status'),
('_id', 'string', '_id'),
],
}
],
'output':
{'db':
{'name': 'mssConnAndCur',
'params': self.setting['oParams'],
'table': 'inboundOrdersBase',
'unique':'_id'
},
'op':
{'fun': 'mssInsert'},
}
},
{
'fetcher': {
'name': 'mClient',
'params': self.setting['fParams'][0:3],
'pattern': {"ns": "{}.wms.inbound_orders".format(self.setting['fParams'][3])},
'freq': 24
},
'input': [
{'db': {'name': 'mClient', 'params': [*self.setting['iParams'], 'wms.inbound_orders']},
'op': {'fun': 'mAggregate', 'params': [{"$unwind":"$boxes"},{"$unwind":"$boxes.materials"},{"$project":{"_id":1,'code':1,'status':1,'boxes':1}}]},
'columns': [
('_id', 'string', '_id'),
('code', 'string', 'code'),
('status', 'int', 'status'),
('boxes.status', 'int', 'boxesStatus'),
('boxes.code', 'string', 'boxesCode'),
('boxes.carriage_time', 'string', 'carriage_time'),
('boxes.materials.mid', 'string', 'mid'),
('boxes.materials.sapid', 'string', 'sapid'),
('boxes.materials.count', 'int', 'numCount'),
('boxes.physics.package', 'string', 'package'),
('boxes.physics.weight', 'string', 'weight'),
('boxes.physics.volume', 'string', 'volume'),
],
}
],
'output':
{'db':
{'name': 'mssConnAndCur',
'params': self.setting['oParams'],
'table': 'inboundOrdersBoxesMaterials',
'unique': '_id'
},
'op':
{'fun': 'mssInsert'},
}
}
]
| true | true |
1c317d7fb27c2fd6f493b6f63b942ab8f103a516 | 9,481 | py | Python | src/GUI/GUIRegressionLineComparisonController.py | AndreaG93/CPS-Project | e4821aace39b04f8504f3f878fa605d99aaacc8e | [
"MIT"
] | null | null | null | src/GUI/GUIRegressionLineComparisonController.py | AndreaG93/CPS-Project | e4821aace39b04f8504f3f878fa605d99aaacc8e | [
"MIT"
] | null | null | null | src/GUI/GUIRegressionLineComparisonController.py | AndreaG93/CPS-Project | e4821aace39b04f8504f3f878fa605d99aaacc8e | [
"MIT"
] | null | null | null | import threading
from src.Application import Application
from src.GUI.GUICommon import GUICommon
from src.GUI.GUIRegressionLineComparison import GUIRegressionLineComparison
from src.Statistics.UnivariateRegressionLine import UnivariateRegressionLine
from src.TimeSeriesDataset.TimeSeriesDatasetGlobalClimateChange import TimeSeriesDatasetGlobalClimateChange
class GUIRegressionLineComparisonController(GUIRegressionLineComparison):
"""
This class is used as controller for all 'ipywidgets widget' inside 'GUIRegressionLineComparison' class.
"""
def __init__(self, application):
super().__init__()
if application is None:
raise ValueError("[ERROR]: 'application' cannot be 'None'!")
if type(application) is not Application:
raise TypeError("[ERROR]: 'application' must be 'Application' type!")
# Set needed 'dataset'
application.set_current_selected_dataset("GlobalLandTemperaturesByCountry.csv")
self.__application = application
self.__application_options = application.get_current_application_options()
self.__current_selected_dataset = application.get_current_selected_dataset()
self.__lock = threading.Lock()
def display(self):
self.update()
# Populate button event...
self._widget_compare_button.on_click(self.compare_event)
self._widgets_remove_state_comparison_list_button.on_click(self.remove_state)
self._widgets_insert_state_comparison_list_button.on_click(self.add_state)
super(GUIRegressionLineComparisonController, self).display()
def add_state(self, button):
"""
This function add a state to the list for 'Regression Line Comparison'
"""
state = self._widgets_state_for_comparison_combobox.value
# Check for errors...
if state is None or state not in self.__current_selected_dataset.get_state_list():
self._widget_error_label.value = "$\\textbf{[ERROR] Specified 'State' does not exist!}$"
return
else:
self._widget_error_label.value = ""
if state not in self.__application_options.states:
self.__application_options.states.append(state)
self.update()
self._widgets_state_list_comparison_select.value = self.__application_options.states[0]
def remove_state(self, button):
"""
This function remove a state from the list for 'Regression Line Comparison'
"""
state = self._widgets_state_list_comparison_select.value
if state in self.__application_options.states:
self.__application_options.states.remove(state)
if len(self.__application_options.states) != 0:
self._widgets_state_list_comparison_select.value = self.__application_options.states[0]
self.update()
def compare_event(self, button):
"""
This function is used to perform the plot of data according to user specified options.
"""
self._widget_error_label.value = ""
self.display()
try:
if len(self.__application_options.states) == 0:
raise ValueError("[ERROR]: No 'State' specified!!")
if len(self.__application_options.states) == 1:
raise ValueError("[ERROR]: Please, specify at least 2 states!!")
state_regression_line_list = list()
# Build regression lines belonging to each state...
# ========================================= #
for state in self.__application_options.states:
data = self.__current_selected_dataset.get_filtered_data(self.__application_options.month_name,
self.__application_options.month_filter_enabled,
self.__application_options.city,
state,
self.__application_options.plot_time_range_as_years,
self.__application_options.active_columns)
if (len(data)) == 1:
raise ValueError("[ERROR]: Nothing to plot; only 1 record of data!!")
regression_line = TimeSeriesDatasetGlobalClimateChange.compute_univariate_regression_line(data,
name=state)
state_regression_line_list.append(regression_line)
# Print a title...
# ========================================= #
if self.__application_options.month_filter_enabled:
print("Regression Line Rank ({} - {} - {})".format(self.__application_options.month_name,
self.__application_options.plot_time_range_as_years,
self.__application_options.active_columns))
else:
print("Regression Line Rank (Every Months - {} - {})".format(
self.__application_options.plot_time_range_as_years,
self.__application_options.active_columns))
# Rank...
# ========================================= #
UnivariateRegressionLine.rank_regression_lines(state_regression_line_list)
except ValueError as error:
self._widget_error_label.value = "$\\textbf{" + "{}".format(error) + "}$"
def update(self):
"""
This function is used to update any information displayed by the user interface.
"""
self.__lock.acquire()
self.__unregister_callback()
# Section: "Month Selection"
# ========================================= #
self._widget_month_checkbox.value = self.__application_options.month_filter_enabled
self._widget_month_combobox.value = self.__application_options.month_name
self._widget_month_combobox.disabled = not self.__application_options.month_filter_enabled
# Section: "State Selection"
# ========================================= #
self._widgets_state_for_comparison_combobox.options = self.__current_selected_dataset.get_state_list()
self._widgets_state_list_comparison_select.options = self.__application_options.states
# Section: "Time Range Selection"
# ========================================= #
self._widget_time_int_range_slider.max = self.__application_options.available_time_range_as_years[1]
self._widget_time_int_range_slider.min = self.__application_options.available_time_range_as_years[0]
self._widget_time_int_range_slider.value = self.__application_options.plot_time_range_as_years
# Section: "Active Column Selection"
# ========================================= #
self._widget_active_columns_select.options = self.__application_options.available_columns
self._widget_active_columns_select.value = self.__application_options.active_columns[0]
self.__register_callback()
self.__lock.release()
# Callbacks...
# ================================================================================================================ #
@staticmethod
def __is_widget_value_changed(change):
return change['type'] == 'change' and change['name'] == 'value'
def _on_change_widget_month_combobox(self, change):
if GUICommon.is_widget_value_changed(change):
self.__application_options.month_name = change['new']
def _on_change_widget_month_checkbox(self, change):
if GUICommon.is_widget_value_changed(change):
self.__application_options.month_filter_enabled = change['new']
self._widget_month_combobox.disabled = not self.__application_options.month_filter_enabled
def _on_change_widget_time_int_range_slider(self, change):
if GUICommon.is_widget_value_changed(change):
self.__application_options.plot_time_range_as_years = [change['new'][0], change['new'][1]]
def _on_change_widget_active_columns_select(self, change):
if GUICommon.is_widget_value_changed(change):
self.__application_options.active_columns = [change['new']]
def __register_callback(self):
"""
Function used to register callbacks handlers
"""
self._widget_month_combobox.observe(self._on_change_widget_month_combobox)
self._widget_month_checkbox.observe(self._on_change_widget_month_checkbox)
self._widget_time_int_range_slider.observe(self._on_change_widget_time_int_range_slider)
self._widget_active_columns_select.observe(self._on_change_widget_active_columns_select)
def __unregister_callback(self):
"""
Function used to unregister callbacks handlers
"""
self._widget_month_combobox.unobserve(self._on_change_widget_month_combobox)
self._widget_month_checkbox.unobserve(self._on_change_widget_month_checkbox)
self._widget_time_int_range_slider.unobserve(self._on_change_widget_time_int_range_slider)
self._widget_active_columns_select.unobserve(self._on_change_widget_active_columns_select)
| 46.935644 | 125 | 0.632317 | import threading
from src.Application import Application
from src.GUI.GUICommon import GUICommon
from src.GUI.GUIRegressionLineComparison import GUIRegressionLineComparison
from src.Statistics.UnivariateRegressionLine import UnivariateRegressionLine
from src.TimeSeriesDataset.TimeSeriesDatasetGlobalClimateChange import TimeSeriesDatasetGlobalClimateChange
class GUIRegressionLineComparisonController(GUIRegressionLineComparison):
def __init__(self, application):
super().__init__()
if application is None:
raise ValueError("[ERROR]: 'application' cannot be 'None'!")
if type(application) is not Application:
raise TypeError("[ERROR]: 'application' must be 'Application' type!")
application.set_current_selected_dataset("GlobalLandTemperaturesByCountry.csv")
self.__application = application
self.__application_options = application.get_current_application_options()
self.__current_selected_dataset = application.get_current_selected_dataset()
self.__lock = threading.Lock()
def display(self):
self.update()
self._widget_compare_button.on_click(self.compare_event)
self._widgets_remove_state_comparison_list_button.on_click(self.remove_state)
self._widgets_insert_state_comparison_list_button.on_click(self.add_state)
super(GUIRegressionLineComparisonController, self).display()
def add_state(self, button):
state = self._widgets_state_for_comparison_combobox.value
if state is None or state not in self.__current_selected_dataset.get_state_list():
self._widget_error_label.value = "$\\textbf{[ERROR] Specified 'State' does not exist!}$"
return
else:
self._widget_error_label.value = ""
if state not in self.__application_options.states:
self.__application_options.states.append(state)
self.update()
self._widgets_state_list_comparison_select.value = self.__application_options.states[0]
def remove_state(self, button):
state = self._widgets_state_list_comparison_select.value
if state in self.__application_options.states:
self.__application_options.states.remove(state)
if len(self.__application_options.states) != 0:
self._widgets_state_list_comparison_select.value = self.__application_options.states[0]
self.update()
def compare_event(self, button):
self._widget_error_label.value = ""
self.display()
try:
if len(self.__application_options.states) == 0:
raise ValueError("[ERROR]: No 'State' specified!!")
if len(self.__application_options.states) == 1:
raise ValueError("[ERROR]: Please, specify at least 2 states!!")
state_regression_line_list = list()
for state in self.__application_options.states:
data = self.__current_selected_dataset.get_filtered_data(self.__application_options.month_name,
self.__application_options.month_filter_enabled,
self.__application_options.city,
state,
self.__application_options.plot_time_range_as_years,
self.__application_options.active_columns)
if (len(data)) == 1:
raise ValueError("[ERROR]: Nothing to plot; only 1 record of data!!")
regression_line = TimeSeriesDatasetGlobalClimateChange.compute_univariate_regression_line(data,
name=state)
state_regression_line_list.append(regression_line)
if self.__application_options.month_filter_enabled:
print("Regression Line Rank ({} - {} - {})".format(self.__application_options.month_name,
self.__application_options.plot_time_range_as_years,
self.__application_options.active_columns))
else:
print("Regression Line Rank (Every Months - {} - {})".format(
self.__application_options.plot_time_range_as_years,
self.__application_options.active_columns))
UnivariateRegressionLine.rank_regression_lines(state_regression_line_list)
except ValueError as error:
self._widget_error_label.value = "$\\textbf{" + "{}".format(error) + "}$"
def update(self):
self.__lock.acquire()
self.__unregister_callback()
self._widget_month_checkbox.value = self.__application_options.month_filter_enabled
self._widget_month_combobox.value = self.__application_options.month_name
self._widget_month_combobox.disabled = not self.__application_options.month_filter_enabled
self._widgets_state_for_comparison_combobox.options = self.__current_selected_dataset.get_state_list()
self._widgets_state_list_comparison_select.options = self.__application_options.states
self._widget_time_int_range_slider.max = self.__application_options.available_time_range_as_years[1]
self._widget_time_int_range_slider.min = self.__application_options.available_time_range_as_years[0]
self._widget_time_int_range_slider.value = self.__application_options.plot_time_range_as_years
self._widget_active_columns_select.options = self.__application_options.available_columns
self._widget_active_columns_select.value = self.__application_options.active_columns[0]
self.__register_callback()
self.__lock.release()
@staticmethod
def __is_widget_value_changed(change):
return change['type'] == 'change' and change['name'] == 'value'
def _on_change_widget_month_combobox(self, change):
if GUICommon.is_widget_value_changed(change):
self.__application_options.month_name = change['new']
def _on_change_widget_month_checkbox(self, change):
if GUICommon.is_widget_value_changed(change):
self.__application_options.month_filter_enabled = change['new']
self._widget_month_combobox.disabled = not self.__application_options.month_filter_enabled
def _on_change_widget_time_int_range_slider(self, change):
if GUICommon.is_widget_value_changed(change):
self.__application_options.plot_time_range_as_years = [change['new'][0], change['new'][1]]
def _on_change_widget_active_columns_select(self, change):
if GUICommon.is_widget_value_changed(change):
self.__application_options.active_columns = [change['new']]
def __register_callback(self):
self._widget_month_combobox.observe(self._on_change_widget_month_combobox)
self._widget_month_checkbox.observe(self._on_change_widget_month_checkbox)
self._widget_time_int_range_slider.observe(self._on_change_widget_time_int_range_slider)
self._widget_active_columns_select.observe(self._on_change_widget_active_columns_select)
def __unregister_callback(self):
self._widget_month_combobox.unobserve(self._on_change_widget_month_combobox)
self._widget_month_checkbox.unobserve(self._on_change_widget_month_checkbox)
self._widget_time_int_range_slider.unobserve(self._on_change_widget_time_int_range_slider)
self._widget_active_columns_select.unobserve(self._on_change_widget_active_columns_select)
| true | true |
1c317ea0b7f59c761709c3d97506b34be9b6dbbe | 4,914 | py | Python | kubernetes/client/models/v1_token_request_status.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 1 | 2019-10-07T13:54:36.000Z | 2019-10-07T13:54:36.000Z | kubernetes/client/models/v1_token_request_status.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 8 | 2020-10-28T01:18:36.000Z | 2021-06-11T01:06:15.000Z | kubernetes/client/models/v1_token_request_status.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 1 | 2021-03-16T16:05:33.000Z | 2021-03-16T16:05:33.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1TokenRequestStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expiration_timestamp': 'datetime',
'token': 'str'
}
attribute_map = {
'expiration_timestamp': 'expirationTimestamp',
'token': 'token'
}
def __init__(self, expiration_timestamp=None, token=None, local_vars_configuration=None): # noqa: E501
"""V1TokenRequestStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expiration_timestamp = None
self._token = None
self.discriminator = None
self.expiration_timestamp = expiration_timestamp
self.token = token
@property
def expiration_timestamp(self):
"""Gets the expiration_timestamp of this V1TokenRequestStatus. # noqa: E501
ExpirationTimestamp is the time of expiration of the returned token. # noqa: E501
:return: The expiration_timestamp of this V1TokenRequestStatus. # noqa: E501
:rtype: datetime
"""
return self._expiration_timestamp
@expiration_timestamp.setter
def expiration_timestamp(self, expiration_timestamp):
"""Sets the expiration_timestamp of this V1TokenRequestStatus.
ExpirationTimestamp is the time of expiration of the returned token. # noqa: E501
:param expiration_timestamp: The expiration_timestamp of this V1TokenRequestStatus. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and expiration_timestamp is None: # noqa: E501
raise ValueError("Invalid value for `expiration_timestamp`, must not be `None`") # noqa: E501
self._expiration_timestamp = expiration_timestamp
@property
def token(self):
"""Gets the token of this V1TokenRequestStatus. # noqa: E501
Token is the opaque bearer token. # noqa: E501
:return: The token of this V1TokenRequestStatus. # noqa: E501
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this V1TokenRequestStatus.
Token is the opaque bearer token. # noqa: E501
:param token: The token of this V1TokenRequestStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and token is None: # noqa: E501
raise ValueError("Invalid value for `token`, must not be `None`") # noqa: E501
self._token = token
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1TokenRequestStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1TokenRequestStatus):
return True
return self.to_dict() != other.to_dict()
| 32.117647 | 124 | 0.620472 |
import pprint
import re
import six
from kubernetes.client.configuration import Configuration
class V1TokenRequestStatus(object):
openapi_types = {
'expiration_timestamp': 'datetime',
'token': 'str'
}
attribute_map = {
'expiration_timestamp': 'expirationTimestamp',
'token': 'token'
}
def __init__(self, expiration_timestamp=None, token=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expiration_timestamp = None
self._token = None
self.discriminator = None
self.expiration_timestamp = expiration_timestamp
self.token = token
@property
def expiration_timestamp(self):
return self._expiration_timestamp
@expiration_timestamp.setter
def expiration_timestamp(self, expiration_timestamp):
if self.local_vars_configuration.client_side_validation and expiration_timestamp is None:
raise ValueError("Invalid value for `expiration_timestamp`, must not be `None`")
self._expiration_timestamp = expiration_timestamp
@property
def token(self):
return self._token
@token.setter
def token(self, token):
if self.local_vars_configuration.client_side_validation and token is None:
raise ValueError("Invalid value for `token`, must not be `None`")
self._token = token
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1TokenRequestStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1TokenRequestStatus):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c317f4fd515e3436cae2120e0c79f18c6c754ab | 828 | py | Python | src/derivatives.py | zhooda/learn-ml | e35765fdc3b27fd923cff89cc086d5093eeee25b | [
"MIT"
] | null | null | null | src/derivatives.py | zhooda/learn-ml | e35765fdc3b27fd923cff89cc086d5093eeee25b | [
"MIT"
] | null | null | null | src/derivatives.py | zhooda/learn-ml | e35765fdc3b27fd923cff89cc086d5093eeee25b | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
def f(x):
return 2*x**2
x = np.arange(0, 5, 0.001)
y = f(x)
plt.plot(x, y)
colors = ['k', 'g', 'r', 'b', 'c']
def approximate_tangent_line(x, approximate_derivative):
return (approximate_derivative*x) + b
for i in range(5):
p2_delta = 0.0001
x1 = i
x2 = x1 + p2_delta
y1 = f(x1)
y2 = f(x2)
print((x1, y1), (x2, y2))
approximate_derivative =(y2-y1)/(x2-x1)
b = y2 - approximate_derivative*x2
to_plot = [x1-0.9, x1, x1+0.9]
plt.scatter(x1, y1, c=colors[i])
plt.plot([point for point in to_plot],
[approximate_tangent_line(point, approximate_derivative) for point in to_plot],
c=colors[i])
print('approximate derivtive for f(x)', f'where x = {x1} is {approximate_derivative}')
plt.show() | 22.378378 | 92 | 0.613527 | import matplotlib.pyplot as plt
import numpy as np
def f(x):
return 2*x**2
x = np.arange(0, 5, 0.001)
y = f(x)
plt.plot(x, y)
colors = ['k', 'g', 'r', 'b', 'c']
def approximate_tangent_line(x, approximate_derivative):
return (approximate_derivative*x) + b
for i in range(5):
p2_delta = 0.0001
x1 = i
x2 = x1 + p2_delta
y1 = f(x1)
y2 = f(x2)
print((x1, y1), (x2, y2))
approximate_derivative =(y2-y1)/(x2-x1)
b = y2 - approximate_derivative*x2
to_plot = [x1-0.9, x1, x1+0.9]
plt.scatter(x1, y1, c=colors[i])
plt.plot([point for point in to_plot],
[approximate_tangent_line(point, approximate_derivative) for point in to_plot],
c=colors[i])
print('approximate derivtive for f(x)', f'where x = {x1} is {approximate_derivative}')
plt.show() | true | true |
1c317f6c0f1e463ad8d1cbe96e1c7e414a8b3b57 | 956 | py | Python | model/nnet/ocr.py | wahtak/ascii-net | 8930a6fce7a6c51dd90231ebc418fdab53e5aca2 | [
"MIT"
] | 22 | 2016-10-02T12:10:26.000Z | 2022-03-10T04:21:18.000Z | model/nnet/ocr.py | wahtak/ascii-net | 8930a6fce7a6c51dd90231ebc418fdab53e5aca2 | [
"MIT"
] | null | null | null | model/nnet/ocr.py | wahtak/ascii-net | 8930a6fce7a6c51dd90231ebc418fdab53e5aca2 | [
"MIT"
] | 5 | 2017-05-12T21:53:06.000Z | 2022-01-10T10:21:19.000Z | import numpy as np
from .mlp import SoftmaxMLP
class OcrModel(object):
def __init__(self, shape_pixels, num_classes):
# flattend input shape
self.num_pixels = shape_pixels[0] * shape_pixels[1]
self.model = SoftmaxMLP(num_inputs=self.num_pixels,
num_hidden=self.num_pixels,
num_outputs=num_classes)
def flatten_pixels(self, inputs):
return inputs.reshape((-1, self.num_pixels))
def train(self, inputs, labels, epochs=1):
for i in range(epochs):
train_error = self.model.train_batch(
self.flatten_pixels(inputs), labels, learning_rate=0.2)
return train_error
def predict(self, inputs):
res = np.argmax(
np.apply_along_axis(
self.model.evaluate,
axis=1,
arr=self.flatten_pixels(inputs)),
axis=1)
return res | 28.969697 | 71 | 0.58159 | import numpy as np
from .mlp import SoftmaxMLP
class OcrModel(object):
def __init__(self, shape_pixels, num_classes):
self.num_pixels = shape_pixels[0] * shape_pixels[1]
self.model = SoftmaxMLP(num_inputs=self.num_pixels,
num_hidden=self.num_pixels,
num_outputs=num_classes)
def flatten_pixels(self, inputs):
return inputs.reshape((-1, self.num_pixels))
def train(self, inputs, labels, epochs=1):
for i in range(epochs):
train_error = self.model.train_batch(
self.flatten_pixels(inputs), labels, learning_rate=0.2)
return train_error
def predict(self, inputs):
res = np.argmax(
np.apply_along_axis(
self.model.evaluate,
axis=1,
arr=self.flatten_pixels(inputs)),
axis=1)
return res | true | true |
1c317fcf19c952a9da772cb9807e81b8d1b5a974 | 2,878 | py | Python | tests_python/tests/test_proto_demo_counter.py | Piotr170687/tezos | c7e84dfae2837096a4188e835fb780ad514ec2d3 | [
"MIT"
] | 1 | 2021-11-03T08:21:08.000Z | 2021-11-03T08:21:08.000Z | tests_python/tests/test_proto_demo_counter.py | Piotr170687/tezos | c7e84dfae2837096a4188e835fb780ad514ec2d3 | [
"MIT"
] | null | null | null | tests_python/tests/test_proto_demo_counter.py | Piotr170687/tezos | c7e84dfae2837096a4188e835fb780ad514ec2d3 | [
"MIT"
] | 4 | 2021-04-27T15:00:34.000Z | 2021-09-26T21:50:01.000Z | import time
import pytest
from client.client import Client
PROTO = 'ProtoDemoCounterDemoCounterDemoCounterDemoCou4LSpdT'
PROTO_GENESIS = 'ProtoGenesisGenesisGenesisGenesisGenesisGenesk612im'
PARAMS = ['-p', PROTO_GENESIS]
@pytest.fixture(scope="class")
def client(sandbox):
"""One node with genesis."""
sandbox.add_node(0)
client = sandbox.client(0)
yield client
@pytest.mark.incremental
class TestProtoDemo:
"""Activate protocol demo_counter, inject operations and bake block.
This test relies on the fixture client which launches a single
sandboxed node.
"""
def test_proto_known(self, client: Client):
res = client.list_protocols()
assert PROTO in res
def test_proto_client_known(self, client: Client):
res = client.list_understood_protocols()
assert 'ProtoDemoCou' in res
def test_first_protocol(self, client: Client):
proto = 'PrihK96nBAFSxVL1GLJTVhu9YnzkMFiBeuJRPA8NwuZVZCE1L6i'
assert client.get_protocol() == proto
def test_activate_proto(self, client: Client):
parameters = {'init_a': 100, 'init_b': 100}
res = client.activate_protocol_json(PROTO, parameters, key='activator',
fitness='1')
assert res.block_hash
def test_level1(self, client: Client):
assert client.get_level() == 1
def test_protocol_genesis(self, client: Client):
assert client.get_protocol() == PROTO_GENESIS
def test_bake_command(self, client: Client):
time.sleep(1)
client.run(['bake', 'This is block 2'])
def test_level2(self, client: Client):
head = client.rpc('get', '/chains/main/blocks/head/')
assert head['header']['level'] == 2
def test_inject_operations(self, client: Client):
client.run(['increment', 'a'])
client.run(['increment', 'b'])
client.run(['transfer', '10'])
def test_mempool(self, client: Client):
ops = client.get_mempool()
assert len(ops['applied']) == 3
def test_bake_command_2(self, client: Client):
time.sleep(1)
client.run(['bake', 'This is block 3'])
def test_level3(self, client: Client):
head = client.rpc('get', '/chains/main/blocks/head/')
assert head['header']['level'] == 3
def test_rpc_counter_a(self, client: Client):
head = client.rpc('get', '/chains/main/blocks/head/counter/a')
assert head == 91
def test_rpc_counter_b(self, client: Client):
head = client.rpc('get', '/chains/main/blocks/head/counter/b')
assert head == 111
def test_get_counter_commands(self, client: Client):
message_a = client.run(['get', 'a'])
assert message_a == "The counter value is 91\n"
message_b = client.run(['get', 'b'])
assert message_b == "The counter value is 111\n"
| 32.704545 | 79 | 0.648019 | import time
import pytest
from client.client import Client
PROTO = 'ProtoDemoCounterDemoCounterDemoCounterDemoCou4LSpdT'
PROTO_GENESIS = 'ProtoGenesisGenesisGenesisGenesisGenesisGenesk612im'
PARAMS = ['-p', PROTO_GENESIS]
@pytest.fixture(scope="class")
def client(sandbox):
sandbox.add_node(0)
client = sandbox.client(0)
yield client
@pytest.mark.incremental
class TestProtoDemo:
def test_proto_known(self, client: Client):
res = client.list_protocols()
assert PROTO in res
def test_proto_client_known(self, client: Client):
res = client.list_understood_protocols()
assert 'ProtoDemoCou' in res
def test_first_protocol(self, client: Client):
proto = 'PrihK96nBAFSxVL1GLJTVhu9YnzkMFiBeuJRPA8NwuZVZCE1L6i'
assert client.get_protocol() == proto
def test_activate_proto(self, client: Client):
parameters = {'init_a': 100, 'init_b': 100}
res = client.activate_protocol_json(PROTO, parameters, key='activator',
fitness='1')
assert res.block_hash
def test_level1(self, client: Client):
assert client.get_level() == 1
def test_protocol_genesis(self, client: Client):
assert client.get_protocol() == PROTO_GENESIS
def test_bake_command(self, client: Client):
time.sleep(1)
client.run(['bake', 'This is block 2'])
def test_level2(self, client: Client):
head = client.rpc('get', '/chains/main/blocks/head/')
assert head['header']['level'] == 2
def test_inject_operations(self, client: Client):
client.run(['increment', 'a'])
client.run(['increment', 'b'])
client.run(['transfer', '10'])
def test_mempool(self, client: Client):
ops = client.get_mempool()
assert len(ops['applied']) == 3
def test_bake_command_2(self, client: Client):
time.sleep(1)
client.run(['bake', 'This is block 3'])
def test_level3(self, client: Client):
head = client.rpc('get', '/chains/main/blocks/head/')
assert head['header']['level'] == 3
def test_rpc_counter_a(self, client: Client):
head = client.rpc('get', '/chains/main/blocks/head/counter/a')
assert head == 91
def test_rpc_counter_b(self, client: Client):
head = client.rpc('get', '/chains/main/blocks/head/counter/b')
assert head == 111
def test_get_counter_commands(self, client: Client):
message_a = client.run(['get', 'a'])
assert message_a == "The counter value is 91\n"
message_b = client.run(['get', 'b'])
assert message_b == "The counter value is 111\n"
| true | true |
1c318164acf7e698b7f26a2cd24daddb4ab19760 | 9,488 | py | Python | src/lslidar_ws/devel/lib/python2.7/dist-packages/lslidar_c16_msgs/msg/_LslidarC16ScanUnified.py | Louis-AD-git/racecar_ws | 3c5cb561d1aee11d80a7f3847e0334e93f345513 | [
"MIT"
] | null | null | null | src/lslidar_ws/devel/lib/python2.7/dist-packages/lslidar_c16_msgs/msg/_LslidarC16ScanUnified.py | Louis-AD-git/racecar_ws | 3c5cb561d1aee11d80a7f3847e0334e93f345513 | [
"MIT"
] | null | null | null | src/lslidar_ws/devel/lib/python2.7/dist-packages/lslidar_c16_msgs/msg/_LslidarC16ScanUnified.py | Louis-AD-git/racecar_ws | 3c5cb561d1aee11d80a7f3847e0334e93f345513 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from lslidar_c16_msgs/LslidarC16ScanUnified.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
import lslidar_c16_msgs.msg
import std_msgs.msg
class LslidarC16ScanUnified(genpy.Message):
_md5sum = "a02f26cda99b9e0189aac08ed1065a71"
_type = "lslidar_c16_msgs/LslidarC16ScanUnified"
_has_header = True # flag to mark the presence of a Header object
_full_text = """#lslidar C16 scan packets
Header header #standard ros message header
uint64 basetime #time to the top of hour
LslidarC16Packet[] packets #vector of raw packets
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: lslidar_c16_msgs/LslidarC16Packet
# Raw Leishen LIDAR packet.
time stamp # packet timestamp
uint8[1206] data # packet contents
"""
__slots__ = ['header','basetime','packets']
_slot_types = ['std_msgs/Header','uint64','lslidar_c16_msgs/LslidarC16Packet[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,basetime,packets
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(LslidarC16ScanUnified, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.basetime is None:
self.basetime = 0
if self.packets is None:
self.packets = []
else:
self.header = std_msgs.msg.Header()
self.basetime = 0
self.packets = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.basetime
buff.write(_get_struct_Q().pack(_x))
length = len(self.packets)
buff.write(_struct_I.pack(length))
for val1 in self.packets:
_v1 = val1.stamp
_x = _v1
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = val1.data
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_1206B().pack(*_x))
else:
buff.write(_get_struct_1206s().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.packets is None:
self.packets = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 8
(self.basetime,) = _get_struct_Q().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.packets = []
for i in range(0, length):
val1 = lslidar_c16_msgs.msg.LslidarC16Packet()
_v2 = val1.stamp
_x = _v2
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 1206
val1.data = str[start:end]
self.packets.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.basetime
buff.write(_get_struct_Q().pack(_x))
length = len(self.packets)
buff.write(_struct_I.pack(length))
for val1 in self.packets:
_v3 = val1.stamp
_x = _v3
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = val1.data
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_1206B().pack(*_x))
else:
buff.write(_get_struct_1206s().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.packets is None:
self.packets = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 8
(self.basetime,) = _get_struct_Q().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.packets = []
for i in range(0, length):
val1 = lslidar_c16_msgs.msg.LslidarC16Packet()
_v4 = val1.stamp
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 1206
val1.data = str[start:end]
self.packets.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_1206B = None
def _get_struct_1206B():
global _struct_1206B
if _struct_1206B is None:
_struct_1206B = struct.Struct("<1206B")
return _struct_1206B
_struct_1206s = None
def _get_struct_1206s():
global _struct_1206s
if _struct_1206s is None:
_struct_1206s = struct.Struct("<1206s")
return _struct_1206s
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_Q = None
def _get_struct_Q():
global _struct_Q
if _struct_Q is None:
_struct_Q = struct.Struct("<Q")
return _struct_Q
| 34.007168 | 145 | 0.629743 |
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
import lslidar_c16_msgs.msg
import std_msgs.msg
class LslidarC16ScanUnified(genpy.Message):
_md5sum = "a02f26cda99b9e0189aac08ed1065a71"
_type = "lslidar_c16_msgs/LslidarC16ScanUnified"
_has_header = True
_full_text = """#lslidar C16 scan packets
Header header #standard ros message header
uint64 basetime #time to the top of hour
LslidarC16Packet[] packets #vector of raw packets
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: lslidar_c16_msgs/LslidarC16Packet
# Raw Leishen LIDAR packet.
time stamp # packet timestamp
uint8[1206] data # packet contents
"""
__slots__ = ['header','basetime','packets']
_slot_types = ['std_msgs/Header','uint64','lslidar_c16_msgs/LslidarC16Packet[]']
def __init__(self, *args, **kwds):
if args or kwds:
super(LslidarC16ScanUnified, self).__init__(*args, **kwds)
if self.header is None:
self.header = std_msgs.msg.Header()
if self.basetime is None:
self.basetime = 0
if self.packets is None:
self.packets = []
else:
self.header = std_msgs.msg.Header()
self.basetime = 0
self.packets = []
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.basetime
buff.write(_get_struct_Q().pack(_x))
length = len(self.packets)
buff.write(_struct_I.pack(length))
for val1 in self.packets:
_v1 = val1.stamp
_x = _v1
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = val1.data
if type(_x) in [list, tuple]:
buff.write(_get_struct_1206B().pack(*_x))
else:
buff.write(_get_struct_1206s().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.packets is None:
self.packets = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 8
(self.basetime,) = _get_struct_Q().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.packets = []
for i in range(0, length):
val1 = lslidar_c16_msgs.msg.LslidarC16Packet()
_v2 = val1.stamp
_x = _v2
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 1206
val1.data = str[start:end]
self.packets.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.basetime
buff.write(_get_struct_Q().pack(_x))
length = len(self.packets)
buff.write(_struct_I.pack(length))
for val1 in self.packets:
_v3 = val1.stamp
_x = _v3
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = val1.data
if type(_x) in [list, tuple]:
buff.write(_get_struct_1206B().pack(*_x))
else:
buff.write(_get_struct_1206s().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.packets is None:
self.packets = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 8
(self.basetime,) = _get_struct_Q().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.packets = []
for i in range(0, length):
val1 = lslidar_c16_msgs.msg.LslidarC16Packet()
_v4 = val1.stamp
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 1206
val1.data = str[start:end]
self.packets.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_1206B = None
def _get_struct_1206B():
global _struct_1206B
if _struct_1206B is None:
_struct_1206B = struct.Struct("<1206B")
return _struct_1206B
_struct_1206s = None
def _get_struct_1206s():
global _struct_1206s
if _struct_1206s is None:
_struct_1206s = struct.Struct("<1206s")
return _struct_1206s
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_Q = None
def _get_struct_Q():
global _struct_Q
if _struct_Q is None:
_struct_Q = struct.Struct("<Q")
return _struct_Q
| true | true |
1c3181f5924e91bcd8113ea946b0f4722b4a7ea0 | 155 | py | Python | image_loader/image/apps.py | PiochU19/image-loader | 7abec95f5d41e859fe65607ab7bd442a855bc2a0 | [
"MIT"
] | null | null | null | image_loader/image/apps.py | PiochU19/image-loader | 7abec95f5d41e859fe65607ab7bd442a855bc2a0 | [
"MIT"
] | null | null | null | image_loader/image/apps.py | PiochU19/image-loader | 7abec95f5d41e859fe65607ab7bd442a855bc2a0 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class ImageConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'image_loader.image'
| 22.142857 | 56 | 0.767742 | from django.apps import AppConfig
class ImageConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'image_loader.image'
| true | true |
1c31827d24f797c5008773831971619ab8d237c6 | 385 | py | Python | src/tram/wsgi.py | f0r3ns1cat0r/tram | e7ddfe083aa2fab9657f012303043409462964f8 | [
"Apache-2.0"
] | 109 | 2021-09-29T00:08:14.000Z | 2022-03-31T03:00:29.000Z | src/tram/wsgi.py | f0r3ns1cat0r/tram | e7ddfe083aa2fab9657f012303043409462964f8 | [
"Apache-2.0"
] | 86 | 2021-09-28T11:50:38.000Z | 2022-03-31T17:42:47.000Z | src/tram/wsgi.py | f0r3ns1cat0r/tram | e7ddfe083aa2fab9657f012303043409462964f8 | [
"Apache-2.0"
] | 35 | 2021-09-29T11:09:00.000Z | 2022-03-24T06:32:33.000Z | """
WSGI config for tram project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tram.settings")
application = get_wsgi_application()
| 22.647059 | 78 | 0.781818 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tram.settings")
application = get_wsgi_application()
| true | true |
1c3183ae548981102c364d1214f07478edbf2cc8 | 14,762 | py | Python | test/functional/rpc_addressindex.py | cryptomiles/cryptomiles | d3815eaf7716fbca9459f4162ae7ba4714298d27 | [
"MIT"
] | null | null | null | test/functional/rpc_addressindex.py | cryptomiles/cryptomiles | d3815eaf7716fbca9459f4162ae7ba4714298d27 | [
"MIT"
] | null | null | null | test/functional/rpc_addressindex.py | cryptomiles/cryptomiles | d3815eaf7716fbca9459f4162ae7ba4714298d27 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Cryptomiles Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_framework import CryptomilesTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class AddressIndexTest(CryptomilesTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.add_nodes(4, [
# Nodes 0/1 are "wallet" nodes
["-debug", "-relaypriority=0"],
["-debug", "-addressindex"],
# Nodes 2/3 are used for testing
["-debug", "-addressindex", "-relaypriority=0"],
["-debug", "-addressindex"]])
self.start_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 0)
# Check p2pkh and p2sh address indexes
print("Testing p2pkh and p2sh address index...")
txid0 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 10)
self.nodes[0].generate(1)
txidb0 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 10)
self.nodes[0].generate(1)
txid1 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 15)
self.nodes[0].generate(1)
txidb1 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 15)
self.nodes[0].generate(1)
txid2 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 20)
self.nodes[0].generate(1)
txidb2 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 20)
self.nodes[0].generate(1)
self.sync_all()
txids = self.nodes[1].getaddresstxids("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs")
assert_equal(len(txids), 3)
assert_equal(txids[0], txid0)
assert_equal(txids[1], txid1)
assert_equal(txids[2], txid2)
txidsb = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsb), 3)
assert_equal(txidsb[0], txidb0)
assert_equal(txidsb[1], txidb1)
assert_equal(txidsb[2], txidb2)
# Check that limiting by height works
print("Testing querying txids by range of block heights..")
height_txids = self.nodes[1].getaddresstxids({
"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br"],
"start": 105,
"end": 110
})
assert_equal(len(height_txids), 2)
assert_equal(height_txids[0], txidb0)
assert_equal(height_txids[1], txidb1)
# Check that multiple addresses works
multitxids = self.nodes[1].getaddresstxids({"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", "mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs"]})
assert_equal(len(multitxids), 6)
assert_equal(multitxids[0], txid0)
assert_equal(multitxids[1], txidb0)
assert_equal(multitxids[2], txid1)
assert_equal(multitxids[3], txidb1)
assert_equal(multitxids[4], txid2)
assert_equal(multitxids[5], txidb2)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000)
# Check that outputs with the same address will only return one txid
print("Testing for txid uniqueness...")
addressHash = bytes([99,73,164,24,252,69,120,209,10,55,43,84,180,92,40,12,200,196,56,47])
scriptPubKey = CScript([OP_HASH160, addressHash, OP_EQUAL])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(10, scriptPubKey), CTxOut(11, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
txidsmany = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsmany), 4)
assert_equal(txidsmany[3], sent_txid)
# Check that balances are correct
print("Testing balances...")
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000 + 21)
# Check that balances are correct after spending
print("Testing balances after spending...")
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].importprivkey(privkey2)
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = int(unspent[0]["amount"] * 100000000 - 100000)
tx.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
spending_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance1 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance1["balance"], amount)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spending_txid, 16), 0))]
send_amount = 1 * 100000000 + 12840
change_amount = amount - send_amount - 10000
tx.vout = [CTxOut(change_amount, scriptPubKey2), CTxOut(send_amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance2 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance2["balance"], change_amount)
# Check that deltas are returned correctly
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 1, "end": 200})
balance3 = 0
for delta in deltas:
balance3 += delta["satoshis"]
assert_equal(balance3, change_amount)
assert_equal(deltas[0]["address"], address2)
assert_equal(deltas[0]["blockindex"], 1)
# Check that entire range will be queried
deltasAll = self.nodes[1].getaddressdeltas({"addresses": [address2]})
assert_equal(len(deltasAll), len(deltas))
# Check that deltas can be returned from range of block heights
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 113, "end": 113})
assert_equal(len(deltas), 1)
# Check that unspent outputs can be queried
print("Testing utxos...")
utxos = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos), 1)
assert_equal(utxos[0]["satoshis"], change_amount)
# Check that indexes will be updated with a reorg
print("Testing reorg...")
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
self.nodes[1].invalidateblock(best_hash)
self.nodes[2].invalidateblock(best_hash)
self.nodes[3].invalidateblock(best_hash)
self.sync_all()
balance4 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance4, balance1)
utxos2 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos2), 1)
assert_equal(utxos2[0]["satoshis"], amount)
# Check sorting of utxos
self.nodes[2].generate(150)
txidsort1 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
txidsort2 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
self.sync_all()
utxos3 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos3), 3)
assert_equal(utxos3[0]["height"], 114)
assert_equal(utxos3[1]["height"], 264)
assert_equal(utxos3[2]["height"], 265)
# Check mempool indexing
print("Testing mempool indexing...")
privKey3 = "cVfUn53hAbRrDEuMexyfgDpZPhF7KqXpS8UZevsyTDaugB7HZ3CD"
address3 = "mw4ynwhS7MmrQ27hr82kgqu7zryNDK26JB"
addressHash3 = bytes([170,152,114,181,187,205,181,17,216,158,14,17,170,39,218,115,253,44,63,80])
scriptPubKey3 = CScript([OP_DUP, OP_HASH160, addressHash3, OP_EQUALVERIFY, OP_CHECKSIG])
address4 = "2N8oFVB2vThAKury4vnLquW2zVjsYjjAkYQ"
scriptPubKey4 = CScript([OP_HASH160, addressHash3, OP_EQUAL])
unspent = self.nodes[2].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = int(unspent[0]["amount"] * 100000000 - 100000)
tx.vout = [CTxOut(amount, scriptPubKey3)]
tx.rehash()
signed_tx = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True)
time.sleep(2)
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(unspent[1]["txid"], 16), unspent[1]["vout"]))]
amount = int(unspent[1]["amount"] * 100000000 - 100000)
tx2.vout = [
CTxOut(int(amount / 4), scriptPubKey3),
CTxOut(int(amount / 4), scriptPubKey3),
CTxOut(int(amount / 4), scriptPubKey4),
CTxOut(int(amount / 4), scriptPubKey4)
]
tx2.rehash()
signed_tx2 = self.nodes[2].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
memtxid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True)
time.sleep(2)
mempool = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool), 3)
assert_equal(mempool[0]["txid"], memtxid1)
assert_equal(mempool[0]["address"], address3)
assert_equal(mempool[0]["index"], 0)
assert_equal(mempool[1]["txid"], memtxid2)
assert_equal(mempool[1]["index"], 0)
assert_equal(mempool[2]["txid"], memtxid2)
assert_equal(mempool[2]["index"], 1)
blk_hashes = self.nodes[2].generate(1);
self.sync_all();
mempool2 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool2), 0)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(memtxid2, 16), 0)),
CTxIn(COutPoint(int(memtxid2, 16), 1))
]
tx.vout = [CTxOut(int(amount / 2 - 10000), scriptPubKey2)]
tx.rehash()
self.nodes[2].importprivkey(privKey3)
signed_tx3 = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid3 = self.nodes[2].sendrawtransaction(signed_tx3["hex"], True)
time.sleep(2)
mempool3 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool3), 2)
assert_equal(mempool3[0]["prevtxid"], memtxid2)
assert_equal(mempool3[0]["prevout"], 0)
assert_equal(mempool3[1]["prevtxid"], memtxid2)
assert_equal(mempool3[1]["prevout"], 1)
# sending and receiving to the same address
privkey1 = "cQY2s58LhzUCmEXN8jtAp1Etnijx78YRZ466w4ikX1V4UpTpbsf8"
address1 = "myAUWSHnwsQrhuMWv4Br6QsCnpB41vFwHn"
address1hash = bytes([193,146,191,247,81,175,142,254,193,81,53,212,43,254,237,249,26,111,62,52])
address1script = CScript([OP_DUP, OP_HASH160, address1hash, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].sendtoaddress(address1, 10)
self.nodes[0].generate(1)
self.sync_all()
utxos = self.nodes[1].getaddressutxos({"addresses": [address1]})
assert_equal(len(utxos), 1)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["outputIndex"]))
]
amount = int(utxos[0]["satoshis"] - 1000)
tx.vout = [CTxOut(amount, address1script)]
tx.rehash()
self.nodes[0].importprivkey(privkey1)
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
mem_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.sync_all()
mempool_deltas = self.nodes[2].getaddressmempool({"addresses": [address1]})
assert_equal(len(mempool_deltas), 2)
# Include chaininfo in results
print("Testing results with chain info...")
deltas_with_info = self.nodes[1].getaddressdeltas({
"addresses": [address2],
"start": 1,
"end": 200,
"chainInfo": True
})
start_block_hash = self.nodes[1].getblockhash(1);
end_block_hash = self.nodes[1].getblockhash(200);
assert_equal(deltas_with_info["start"]["height"], 1)
assert_equal(deltas_with_info["start"]["hash"], start_block_hash)
assert_equal(deltas_with_info["end"]["height"], 200)
assert_equal(deltas_with_info["end"]["hash"], end_block_hash)
utxos_with_info = self.nodes[1].getaddressutxos({"addresses": [address2], "chainInfo": True})
expected_tip_block_hash = self.nodes[1].getblockhash(267);
assert_equal(utxos_with_info["height"], 267)
assert_equal(utxos_with_info["hash"], expected_tip_block_hash)
print("Passed\n")
if __name__ == '__main__':
AddressIndexTest().main()
| 41.818697 | 144 | 0.64517 |
import time
from test_framework.test_framework import CryptomilesTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class AddressIndexTest(CryptomilesTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.add_nodes(4, [
["-debug", "-relaypriority=0"],
["-debug", "-addressindex"],
["-debug", "-addressindex", "-relaypriority=0"],
["-debug", "-addressindex"]])
self.start_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 0)
print("Testing p2pkh and p2sh address index...")
txid0 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 10)
self.nodes[0].generate(1)
txidb0 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 10)
self.nodes[0].generate(1)
txid1 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 15)
self.nodes[0].generate(1)
txidb1 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 15)
self.nodes[0].generate(1)
txid2 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 20)
self.nodes[0].generate(1)
txidb2 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 20)
self.nodes[0].generate(1)
self.sync_all()
txids = self.nodes[1].getaddresstxids("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs")
assert_equal(len(txids), 3)
assert_equal(txids[0], txid0)
assert_equal(txids[1], txid1)
assert_equal(txids[2], txid2)
txidsb = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsb), 3)
assert_equal(txidsb[0], txidb0)
assert_equal(txidsb[1], txidb1)
assert_equal(txidsb[2], txidb2)
print("Testing querying txids by range of block heights..")
height_txids = self.nodes[1].getaddresstxids({
"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br"],
"start": 105,
"end": 110
})
assert_equal(len(height_txids), 2)
assert_equal(height_txids[0], txidb0)
assert_equal(height_txids[1], txidb1)
multitxids = self.nodes[1].getaddresstxids({"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", "mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs"]})
assert_equal(len(multitxids), 6)
assert_equal(multitxids[0], txid0)
assert_equal(multitxids[1], txidb0)
assert_equal(multitxids[2], txid1)
assert_equal(multitxids[3], txidb1)
assert_equal(multitxids[4], txid2)
assert_equal(multitxids[5], txidb2)
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000)
print("Testing for txid uniqueness...")
addressHash = bytes([99,73,164,24,252,69,120,209,10,55,43,84,180,92,40,12,200,196,56,47])
scriptPubKey = CScript([OP_HASH160, addressHash, OP_EQUAL])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(10, scriptPubKey), CTxOut(11, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
txidsmany = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsmany), 4)
assert_equal(txidsmany[3], sent_txid)
print("Testing balances...")
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000 + 21)
print("Testing balances after spending...")
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].importprivkey(privkey2)
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = int(unspent[0]["amount"] * 100000000 - 100000)
tx.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
spending_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance1 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance1["balance"], amount)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spending_txid, 16), 0))]
send_amount = 1 * 100000000 + 12840
change_amount = amount - send_amount - 10000
tx.vout = [CTxOut(change_amount, scriptPubKey2), CTxOut(send_amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance2 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance2["balance"], change_amount)
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 1, "end": 200})
balance3 = 0
for delta in deltas:
balance3 += delta["satoshis"]
assert_equal(balance3, change_amount)
assert_equal(deltas[0]["address"], address2)
assert_equal(deltas[0]["blockindex"], 1)
deltasAll = self.nodes[1].getaddressdeltas({"addresses": [address2]})
assert_equal(len(deltasAll), len(deltas))
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 113, "end": 113})
assert_equal(len(deltas), 1)
print("Testing utxos...")
utxos = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos), 1)
assert_equal(utxos[0]["satoshis"], change_amount)
print("Testing reorg...")
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
self.nodes[1].invalidateblock(best_hash)
self.nodes[2].invalidateblock(best_hash)
self.nodes[3].invalidateblock(best_hash)
self.sync_all()
balance4 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance4, balance1)
utxos2 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos2), 1)
assert_equal(utxos2[0]["satoshis"], amount)
self.nodes[2].generate(150)
txidsort1 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
txidsort2 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
self.sync_all()
utxos3 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos3), 3)
assert_equal(utxos3[0]["height"], 114)
assert_equal(utxos3[1]["height"], 264)
assert_equal(utxos3[2]["height"], 265)
print("Testing mempool indexing...")
privKey3 = "cVfUn53hAbRrDEuMexyfgDpZPhF7KqXpS8UZevsyTDaugB7HZ3CD"
address3 = "mw4ynwhS7MmrQ27hr82kgqu7zryNDK26JB"
addressHash3 = bytes([170,152,114,181,187,205,181,17,216,158,14,17,170,39,218,115,253,44,63,80])
scriptPubKey3 = CScript([OP_DUP, OP_HASH160, addressHash3, OP_EQUALVERIFY, OP_CHECKSIG])
address4 = "2N8oFVB2vThAKury4vnLquW2zVjsYjjAkYQ"
scriptPubKey4 = CScript([OP_HASH160, addressHash3, OP_EQUAL])
unspent = self.nodes[2].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = int(unspent[0]["amount"] * 100000000 - 100000)
tx.vout = [CTxOut(amount, scriptPubKey3)]
tx.rehash()
signed_tx = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True)
time.sleep(2)
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(unspent[1]["txid"], 16), unspent[1]["vout"]))]
amount = int(unspent[1]["amount"] * 100000000 - 100000)
tx2.vout = [
CTxOut(int(amount / 4), scriptPubKey3),
CTxOut(int(amount / 4), scriptPubKey3),
CTxOut(int(amount / 4), scriptPubKey4),
CTxOut(int(amount / 4), scriptPubKey4)
]
tx2.rehash()
signed_tx2 = self.nodes[2].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
memtxid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True)
time.sleep(2)
mempool = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool), 3)
assert_equal(mempool[0]["txid"], memtxid1)
assert_equal(mempool[0]["address"], address3)
assert_equal(mempool[0]["index"], 0)
assert_equal(mempool[1]["txid"], memtxid2)
assert_equal(mempool[1]["index"], 0)
assert_equal(mempool[2]["txid"], memtxid2)
assert_equal(mempool[2]["index"], 1)
blk_hashes = self.nodes[2].generate(1);
self.sync_all();
mempool2 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool2), 0)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(memtxid2, 16), 0)),
CTxIn(COutPoint(int(memtxid2, 16), 1))
]
tx.vout = [CTxOut(int(amount / 2 - 10000), scriptPubKey2)]
tx.rehash()
self.nodes[2].importprivkey(privKey3)
signed_tx3 = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid3 = self.nodes[2].sendrawtransaction(signed_tx3["hex"], True)
time.sleep(2)
mempool3 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool3), 2)
assert_equal(mempool3[0]["prevtxid"], memtxid2)
assert_equal(mempool3[0]["prevout"], 0)
assert_equal(mempool3[1]["prevtxid"], memtxid2)
assert_equal(mempool3[1]["prevout"], 1)
privkey1 = "cQY2s58LhzUCmEXN8jtAp1Etnijx78YRZ466w4ikX1V4UpTpbsf8"
address1 = "myAUWSHnwsQrhuMWv4Br6QsCnpB41vFwHn"
address1hash = bytes([193,146,191,247,81,175,142,254,193,81,53,212,43,254,237,249,26,111,62,52])
address1script = CScript([OP_DUP, OP_HASH160, address1hash, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].sendtoaddress(address1, 10)
self.nodes[0].generate(1)
self.sync_all()
utxos = self.nodes[1].getaddressutxos({"addresses": [address1]})
assert_equal(len(utxos), 1)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["outputIndex"]))
]
amount = int(utxos[0]["satoshis"] - 1000)
tx.vout = [CTxOut(amount, address1script)]
tx.rehash()
self.nodes[0].importprivkey(privkey1)
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
mem_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.sync_all()
mempool_deltas = self.nodes[2].getaddressmempool({"addresses": [address1]})
assert_equal(len(mempool_deltas), 2)
print("Testing results with chain info...")
deltas_with_info = self.nodes[1].getaddressdeltas({
"addresses": [address2],
"start": 1,
"end": 200,
"chainInfo": True
})
start_block_hash = self.nodes[1].getblockhash(1);
end_block_hash = self.nodes[1].getblockhash(200);
assert_equal(deltas_with_info["start"]["height"], 1)
assert_equal(deltas_with_info["start"]["hash"], start_block_hash)
assert_equal(deltas_with_info["end"]["height"], 200)
assert_equal(deltas_with_info["end"]["hash"], end_block_hash)
utxos_with_info = self.nodes[1].getaddressutxos({"addresses": [address2], "chainInfo": True})
expected_tip_block_hash = self.nodes[1].getblockhash(267);
assert_equal(utxos_with_info["height"], 267)
assert_equal(utxos_with_info["hash"], expected_tip_block_hash)
print("Passed\n")
if __name__ == '__main__':
AddressIndexTest().main()
| true | true |
1c3185b532d5551d010b0dacf8bc60176e707d0c | 701 | py | Python | python3/tests/test_remove_duplicates_from_sorted_list.py | qianbinbin/leetcode | 915cecab0c940cd13847683ec55b17b77eb0f39b | [
"MIT"
] | 4 | 2018-03-05T02:27:16.000Z | 2021-03-15T14:19:44.000Z | python3/tests/test_remove_duplicates_from_sorted_list.py | qianbinbin/leetcode | 915cecab0c940cd13847683ec55b17b77eb0f39b | [
"MIT"
] | null | null | null | python3/tests/test_remove_duplicates_from_sorted_list.py | qianbinbin/leetcode | 915cecab0c940cd13847683ec55b17b77eb0f39b | [
"MIT"
] | 2 | 2018-07-22T10:32:10.000Z | 2018-10-20T03:14:28.000Z | from unittest import TestCase
from leetcodepy.remove_duplicates_from_sorted_list import *
from leetcodepy.utils import linked_lists
solution1 = Solution1()
values1 = (1, 1, 2)
expected1 = linked_lists.from_values(1, 2)
values2 = (1, 1, 2, 3, 3)
expected2 = linked_lists.from_values(1, 2, 3)
class TestRemoveDuplicatesFromSortedList(TestCase):
def test1(self):
head1 = linked_lists.from_values(*values1)
head1 = solution1.deleteDuplicates(head1)
self.assertTrue(linked_lists.equals(expected1, head1))
head2 = linked_lists.from_values(*values2)
head2 = solution1.deleteDuplicates(head2)
self.assertTrue(linked_lists.equals(expected2, head2))
| 26.961538 | 62 | 0.737518 | from unittest import TestCase
from leetcodepy.remove_duplicates_from_sorted_list import *
from leetcodepy.utils import linked_lists
solution1 = Solution1()
values1 = (1, 1, 2)
expected1 = linked_lists.from_values(1, 2)
values2 = (1, 1, 2, 3, 3)
expected2 = linked_lists.from_values(1, 2, 3)
class TestRemoveDuplicatesFromSortedList(TestCase):
def test1(self):
head1 = linked_lists.from_values(*values1)
head1 = solution1.deleteDuplicates(head1)
self.assertTrue(linked_lists.equals(expected1, head1))
head2 = linked_lists.from_values(*values2)
head2 = solution1.deleteDuplicates(head2)
self.assertTrue(linked_lists.equals(expected2, head2))
| true | true |
1c3185d883f9f4a37acb3aba2aa7ffd0a3a76f3c | 80,900 | py | Python | cartography/classification/run_glue.py | dumpmemory/Uneven_training_data | 63350037744b761619d4d8bc7d2122d2bffa2c95 | [
"Apache-2.0"
] | 7 | 2021-09-13T08:53:28.000Z | 2022-03-15T16:18:22.000Z | cartography/classification/run_glue.py | dumpmemory/Uneven_training_data | 63350037744b761619d4d8bc7d2122d2bffa2c95 | [
"Apache-2.0"
] | null | null | null | cartography/classification/run_glue.py | dumpmemory/Uneven_training_data | 63350037744b761619d4d8bc7d2122d2bffa2c95 | [
"Apache-2.0"
] | 2 | 2021-11-09T00:51:11.000Z | 2021-11-09T04:57:20.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Finetuning the library models for sequence classification on GLUE-style tasks
(BERT, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa); modified for Dataset Cartography.
"""
import _jsonnet
import argparse
import glob
import json
import logging
import numpy as np
import os
import random
import shutil
import torch
from scipy.spatial import distance
from scipy.stats import entropy
from itertools import cycle
import torch.nn as nn
#label propagation
import word_level_augment
import torch.nn.functional as F
import mmd_loss
from torch.utils.data import Dataset
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertTokenizer,
RobertaConfig,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from cartography.classification.glue_utils import adapted_glue_compute_metrics as compute_metrics
from cartography.classification.glue_utils import adapted_glue_convert_examples_to_features as convert_examples_to_features
from cartography.classification.glue_utils import glue_output_modes as output_modes
from cartography.classification.glue_utils import glue_processors as processors
from cartography.classification.diagnostics_evaluation import evaluate_by_category
from cartography.classification.models import (
AdaptedBertForMultipleChoice,
AdaptedBertForSequenceClassification,
AdaptedRobertaForMultipleChoice,
AdaptedRobertaForSequenceClassification
)
from cartography.classification.multiple_choice_utils import convert_mc_examples_to_features
from cartography.classification.params import Params, save_args_to_file
from cartography.selection.selection_utils import log_training_dynamics
from cartography.data_utils_glue import convert_string_to_unique_number
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
RobertaConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, AdaptedBertForSequenceClassification, BertTokenizer),
"bert_mc": (BertConfig, AdaptedBertForMultipleChoice, BertTokenizer),
"roberta": (RobertaConfig, AdaptedRobertaForSequenceClassification, RobertaTokenizer),
"roberta_mc": (RobertaConfig, AdaptedRobertaForMultipleChoice, RobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
class TripleDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
def train(args, train_dataset, model, tokenizer, flag_in_training):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# train_sampler = RandomSampler(
# train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=True)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (
len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
if flag_in_training =='finetune':
optimizer = AdamW(optimizer_grouped_parameters, lr=args.finetune_learning_rate, eps=args.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
# args.local_rank = -1
# get_world_size = 1
# args.train_batch_size = 128
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" local_rank = %d", args.local_rank)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_this_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_this_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(f" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {global_step}")
logger.info(f" Will skip the first {steps_trained_in_this_epoch} steps in the first epoch")
tr_loss, logging_loss, epoch_loss = 0.0, 0.0, 0.0
model.zero_grad()
if flag_in_training =='finetune':
train_iterator = trange(epochs_trained,
(int(args.num_train_epochs)*3),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
else:
train_iterator = trange(epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
set_seed(args) # Added here for reproductibility
best_dev_performance = 0
best_epoch = epochs_trained
train_acc = 0.0
total_entropy = 0.
total_sample_size = 0
for epoch, _ in enumerate(train_iterator):
epoch_iterator = tqdm(train_dataloader,
desc="Iteration",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
train_iterator.set_description(f"train_epoch: {epoch} train_acc: {train_acc:.4f}")
train_ids = None
train_golds = None
train_logits = None
train_losses = None
#label propagation
# lingyige_loader = None
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_this_epoch > 0:
steps_trained_in_this_epoch -= 1
continue
#在这个for loop 里面或外面 加data loader 的判断,就是两个data loader load进去
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
entropy=outputs[-1]
if train_logits is None: # Keep track of training dynamics.
train_ids = batch[4].detach().cpu().numpy()
train_logits = outputs[1].detach().cpu().numpy()
train_golds = inputs["labels"].detach().cpu().numpy()
# train_golds = [l.tolist() for l in train_golds]
# print('initial_train_gold', train_golds)
train_losses = loss.detach().cpu().numpy()
train_entropy = entropy.detach().cpu().numpy()
print(entropy.size(), "check entropy size")
else:
train_ids = np.append(train_ids, batch[4].detach().cpu().numpy())
train_logits = np.append(train_logits, outputs[1].detach().cpu().numpy(), axis=0)
train_golds = np.concatenate((train_golds, inputs["labels"].detach().cpu().numpy()), 0)
train_losses = np.append(train_losses, loss.detach().cpu().numpy())
train_entropy = np.append(train_entropy, entropy.detach().cpu().numpy())
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0] and
args.logging_steps > 0 and
global_step % args.logging_steps == 0
):
epoch_log = {}
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training_epoch:
logger.info(f"From within the epoch at step {step}")
results, _ = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
epoch_log[eval_key] = value
epoch_log["learning_rate"] = scheduler.get_lr()[0]
epoch_log["loss"] = (tr_loss - logging_loss) / args.logging_steps
logging_loss = tr_loss
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**epoch_log, **{"step": global_step}}))
if (
args.local_rank in [-1, 0] and
args.save_steps > 0 and
global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
epoch_iterator.set_description(f"lr = {scheduler.get_lr()[0]:.8f}, "
f"loss = {(tr_loss-epoch_loss)/(step+1):.4f}")
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
# mean_entropy = total_entropy / total_sample_size
mean_entropy= np.sum(train_entropy) / np.where(train_entropy < -(np.ones_like(train_entropy) * 1e-10),
np.ones_like(train_entropy),
np.zeros_like(train_entropy)).sum()
logger.info(f"*********************************selected_questions*********************************: {mean_entropy:.4f}***")
#### Post epoch eval ####
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
best_dev_performance, best_epoch = save_model(
args, model, tokenizer, epoch, best_epoch, best_dev_performance)
log_training_dynamics(output_dir=args.output_dir,
epoch=epoch,
train_ids=list(train_ids),
train_logits=list(train_logits),
train_golds=list(train_golds))
train_result = compute_metrics(args.task_name, np.argmax(train_logits, axis=1), train_golds)
train_acc = train_result["acc"]
epoch_log = {"epoch": epoch,
"train_acc": train_acc,
"best_dev_performance": best_dev_performance,
"avg_batch_loss": (tr_loss - epoch_loss) / args.per_gpu_train_batch_size,
"learning_rate": scheduler.get_lr()[0],}
epoch_loss = tr_loss
logger.info(f" End of epoch : {epoch}")
with open(os.path.join(args.output_dir, f"eval_metrics_train.json"), "a") as toutfile:
toutfile.write(json.dumps(epoch_log) + "\n")
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(f" {key}: {value:.6f}")
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
elif args.evaluate_during_training and epoch - best_epoch >= args.patience:
logger.info(f"Ran out of patience. Best epoch was {best_epoch}. "
f"Stopping training at epoch {epoch} out of {args.num_train_epochs} epochs.")
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def interleave(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def lp_train(args, train_dataset, single_dataset, single_aug_dataset, model, tokenizer, flag_in_training):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# train_sampler = RandomSampler(
# train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=True)
if args.label_propagation and args.do_finetune:
single_aug= TripleDataset(single_dataset, single_aug_dataset)
single_train_dataloader = DataLoader(
single_aug, batch_size=args.train_batch_size, shuffle=True)
if args.max_steps > 0:
t_total = args.max_steps
args.ft_num_train_epochs = args.max_steps // (
len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.ft_num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
if flag_in_training =='finetune':
optimizer = AdamW(optimizer_grouped_parameters, lr=args.finetune_learning_rate, eps=args.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
# args.local_rank = -1
# get_world_size = 1
# args.train_batch_size = 128
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.ft_num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" local_rank = %d", args.local_rank)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_this_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_this_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(f" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {global_step}")
logger.info(f" Will skip the first {steps_trained_in_this_epoch} steps in the first epoch")
tr_loss, logging_loss, epoch_loss = 0.0, 0.0, 0.0
model.zero_grad()
if flag_in_training =='finetune':
train_iterator = trange(epochs_trained,
int(args.ft_num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
else:
train_iterator = trange(epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
set_seed(args) # Added here for reproductibility
best_dev_performance = 0
best_epoch = epochs_trained
train_acc = 0.0
total_entropy = 0.
total_sample_size = 0
for epoch, _ in enumerate(train_iterator):
epoch_iterator = tqdm(train_dataloader,
desc="Iteration",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
train_iterator.set_description(f"train_epoch: {epoch} train_acc: {train_acc:.4f}")
train_ids = None
train_golds = None
train_logits = None
train_losses = None
#label propagation
# lingyige_loader = None
single_iter = iter(single_train_dataloader)
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_this_epoch > 0:
steps_trained_in_this_epoch -= 1
continue
#在这个for loop 里面或外面 加data loader 的判断,就是两个data loader load进去
model.train()
try:
inputs_u_w, inputs_u_s = single_iter.next()
except StopIteration:
single_iter = iter(single_train_dataloader)
inputs_u_w, inputs_u_s = single_iter.next()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.label_propagation and args.do_finetune:
# import pdb
# pdb.set_trace()
batch_single = tuple(t.to(args.device) for t in inputs_u_w)
inputs_single = {"input_ids": batch_single[0], "attention_mask": batch_single[1], "labels": batch_single[3]}
if args.model_type != "distilbert":
inputs_single["token_type_ids"] = (
batch_single[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
batch_single_aug = tuple(t.to(args.device) for t in inputs_u_s)
inputs_single_aug = {"input_ids": batch_single_aug[0], "attention_mask": batch_single_aug[1], "labels": batch_single_aug[3]}
if args.model_type != "distilbert":
inputs_single_aug["token_type_ids"] = (
batch_single_aug[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
reg_loss=nn.KLDivLoss()(F.log_softmax(model(**inputs_single_aug)[1]), torch.softmax(model(**inputs_single)[1], dim=-1).detach())
loss= loss + reg_loss *1.0
entropy=outputs[-1]
if train_logits is None: # Keep track of training dynamics.
train_ids = batch[4].detach().cpu().numpy()
train_logits = outputs[1].detach().cpu().numpy()
train_golds = inputs["labels"].detach().cpu().numpy()
# train_golds = [l.tolist() for l in train_golds]
# print('initial_train_gold', train_golds)
train_losses = loss.detach().cpu().numpy()
train_entropy = entropy.detach().cpu().numpy()
print(entropy.size(), "check entropy size")
else:
train_ids = np.append(train_ids, batch[4].detach().cpu().numpy())
train_logits = np.append(train_logits, outputs[1].detach().cpu().numpy(), axis=0)
train_golds = np.concatenate((train_golds, inputs["labels"].detach().cpu().numpy()), 0)
train_losses = np.append(train_losses, loss.detach().cpu().numpy())
train_entropy = np.append(train_entropy, entropy.detach().cpu().numpy())
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0] and
args.logging_steps > 0 and
global_step % args.logging_steps == 0
):
epoch_log = {}
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training_epoch:
logger.info(f"From within the epoch at step {step}")
results, _ = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
epoch_log[eval_key] = value
epoch_log["learning_rate"] = scheduler.get_lr()[0]
epoch_log["loss"] = (tr_loss - logging_loss) / args.logging_steps
logging_loss = tr_loss
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**epoch_log, **{"step": global_step}}))
if (
args.local_rank in [-1, 0] and
args.save_steps > 0 and
global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
epoch_iterator.set_description(f"lr = {scheduler.get_lr()[0]:.8f}, "
f"loss = {(tr_loss-epoch_loss)/(step+1):.4f}")
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
# mean_entropy = total_entropy / total_sample_size
mean_entropy= np.sum(train_entropy) / np.where(train_entropy < -(np.ones_like(train_entropy) * 1e-10),
np.ones_like(train_entropy),
np.zeros_like(train_entropy)).sum()
logger.info(f"*********************************selected_questions*********************************: {mean_entropy:.4f}***")
# logger.info(f"*** Found BEST model, and saved checkpoint. "
# f"BEST dev performance : {dev_performance:.4f} ***")
#### Post epoch eval ####
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
best_dev_performance, best_epoch = save_model(
args, model, tokenizer, epoch, best_epoch, best_dev_performance)
log_training_dynamics(output_dir=args.output_dir,
epoch=epoch,
train_ids=list(train_ids),
train_logits=list(train_logits),
train_golds=list(train_golds))
train_result = compute_metrics(args.task_name, np.argmax(train_logits, axis=1), train_golds)
train_acc = train_result["acc"]
epoch_log = {"epoch": epoch,
"train_acc": train_acc,
"best_dev_performance": best_dev_performance,
"avg_batch_loss": (tr_loss - epoch_loss) / args.per_gpu_train_batch_size,
"learning_rate": scheduler.get_lr()[0],}
epoch_loss = tr_loss
logger.info(f" End of epoch : {epoch}")
with open(os.path.join(args.output_dir, f"eval_metrics_train.json"), "a") as toutfile:
toutfile.write(json.dumps(epoch_log) + "\n")
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(f" {key}: {value:.6f}")
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
elif args.evaluate_during_training and epoch - best_epoch >= args.patience:
logger.info(f"Ran out of patience. Best epoch was {best_epoch}. "
f"Stopping training at epoch {epoch} out of {args.ft_num_train_epochs} epochs.")
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def save_model(args, model, tokenizer, epoch, best_epoch, best_dev_performance):
results, _ = evaluate(args, model, tokenizer, prefix="in_training")
# TODO(SS): change hard coding `acc` as the desired metric, might not work for all tasks.
desired_metric = "acc"
dev_performance = results.get(desired_metric)
# if dev_performance > best_dev_performance:
if True:
best_epoch = epoch
best_dev_performance = dev_performance
# Save model checkpoint
# Take care of distributed/parallel training
model_to_save = (model.module if hasattr(model, "module") else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
logger.info(f"*** Found BEST model, and saved checkpoint. "
f"BEST dev performance : {dev_performance:.4f} ***")
return best_dev_performance, best_epoch
#Entropy
def JSD(P, Q):
M = 0.5 * (P + Q)
# print('entropy', entropy(P, M), P, M)
return 0.5 * (entropy(P, M) + entropy(Q, M))
#torch Kl_div
def JSD_2(P, Q):
P= np.array(P, dtype=float)
Q= np.array(Q, dtype=float)
M = 0.5 * (P+Q)
_jsd = 0.5* ((torch.nn.functional.kl_div(torch.log(torch.from_numpy(M)), torch.from_numpy(P)).numpy() - 0) + (torch.nn.functional.kl_div(torch.log(torch.from_numpy(M)), torch.from_numpy(Q)).numpy() - 0))
return _jsd
def evaluate(args, model, tokenizer, prefix="", eval_split="dev"):
# We do not really need a loop to handle MNLI double evaluation (matched, mis-matched).
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
all_predictions = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset, pair_id = eval_load_and_cache_examples(
args, eval_task, tokenizer, evaluate=True, data_split=f"{eval_split}_{prefix}")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info(f"***** Running {eval_task} {prefix} evaluation on {eval_split} *****")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.eval_batch_size}")
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
example_ids = []
gold_labels = []
for batch in tqdm(eval_dataloader, desc="Evaluating", mininterval=10, ncols=100):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
example_ids += batch[4].tolist()
gold_labels += batch[3].tolist()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
probs = torch.nn.functional.softmax(torch.Tensor(preds), dim=-1)
if args.do_temperature:
probs = torch.nn.functional.softmax(torch.Tensor(preds)/1.75, dim=-1)
max_confidences = (torch.max(probs, dim=-1)[0]).tolist()
preds = np.argmax(preds, axis=1) # Max of logit is the same as max of probability.
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
# order: [E, N, C]
results.update(result)
output_eval_file = os.path.join(
eval_output_dir, f"eval_metrics_{eval_task}_{eval_split}_{prefix}.json")
logger.info(f"***** {eval_task} {eval_split} results {prefix} *****")
for key in sorted(result.keys()):
logger.info(f"{eval_task} {eval_split} {prefix} {key} = {result[key]:.4f}")
with open(output_eval_file, "a") as writer:
writer.write(json.dumps(results) + "\n")
# predictions
all_predictions[eval_task] = []
output_pred_file = os.path.join(
eval_output_dir, f"predictions_{eval_task}_{eval_split}_{prefix}.lst")
with open(output_pred_file, "w") as writer:
logger.info(f"***** Write {eval_task} {eval_split} predictions {prefix} *****")
for ex_id, pred, gold, max_conf, prob in zip(
example_ids, preds, gold_labels, max_confidences, probs.tolist()):
# print(pred, prob, gold);input()
# print('gold_label', processors[args.task_name]().get_labels()[int(max(gold))])
record = {"guid": ex_id,
"label": processors[args.task_name]().get_labels()[pred],
"gold": processors[args.task_name]().get_labels()[int(np.argmax(gold))],
"confidence": max_conf,
"probabilities": prob}
all_predictions[eval_task].append(record)
writer.write(json.dumps(record) + "\n")
# order: [E, N, C]
combined_id = dict()
for id in pair_id[0]:
each_id= convert_string_to_unique_number(id)
combined_id[each_id] = id
ours_file = os.path.join(
eval_output_dir, f"ours_{eval_task}_{eval_split}_{prefix}.json")
result_dict=dict()
result_dict['ours']=dict()
js_divergence_list = []
prediction_entropy_list=[]
kl_divergence_list = []
new_js_divergence_list=[]
new_js_divergence_list_2 = []
with open(ours_file, "w") as writer:
logger.info(f"***** Write ours {eval_task} {eval_split} predictions {prefix} *****")
for ex_id, pred, gold, max_conf, prob in zip(
example_ids, preds, gold_labels, max_confidences, probs.tolist()):
# print(pred, prob, gold);input()
if ex_id in list(combined_id.keys()):
ex_idvalue = combined_id[ex_id]
else:
ex_idvalue ='000000'
# ex_idvalue =combined_id[ex_id]
result_dict['ours'][ex_idvalue]= {"uid": ex_idvalue,
"predicted_probabilities": prob,
"predicted_label": processors[args.task_name]().get_labels()[pred]}
gold_dist = gold
cur_js_divergence = distance.jensenshannon(gold_dist, prob)
if np.isnan(cur_js_divergence):
print("JS for this example is `nan', we will set JS to 0 for the current example. "
"This can be a potential error.",
"Gold distribution:", gold_dist,
"Model distribution:", prob,)
# "UID:", ex_idvalue)
cur_js_divergence = 0 # set error to 0.
else:
pass
new_cur_js_divergence=JSD(np.array(prob, dtype=float), np.array(gold_dist, dtype=float))
new_cur_js_divergence_2 = JSD_2(prob, gold_dist)
js_divergence_list.append(cur_js_divergence)
new_js_divergence_list.append(new_cur_js_divergence)
new_js_divergence_list_2.append(new_cur_js_divergence_2)
# cur_kl_divergence = entropy(gold_dist, prob)
prediction_entropy = entropy(prob)
prediction_entropy_list.append(prediction_entropy)
# print(prob, gold_dist);input()
cur_kl_divergence = torch.nn.functional.kl_div(torch.log(torch.from_numpy(np.array(prob, dtype=float))), torch.from_numpy(np.array(gold_dist, dtype=float))).numpy() - 0
kl_divergence_list.append(cur_kl_divergence)
writer.write(json.dumps(result_dict) + "\n")
avg_js_div = np.mean(js_divergence_list)
new_avg_js_div= np.mean(new_js_divergence_list)
new_avg_js_div_2 = np.mean(new_js_divergence_list_2)
avg_kl_div = np.mean(kl_divergence_list)
avg_entropy=np.mean(prediction_entropy_list)
logger.info(f"***** JS {eval_task} {eval_split} {prefix} = {avg_js_div:.4f}")
logger.info(f"***** entropy JS {eval_task} {eval_split} {prefix} = {new_avg_js_div:.4f}")
logger.info(f"***** kl JS {eval_task} {eval_split} {prefix} = {new_avg_js_div_2:.4f}")
logger.info(f"***** KL {eval_task} {eval_split} {prefix} = {avg_kl_div:.4f}")
logger.info(f"***** Prediction Entropy {eval_task} {eval_split} {prefix} = {avg_entropy:.4f}")
return results, all_predictions
def load_dataset(args, task, eval_split="train"):
processor = processors[task]()
if eval_split == "train":
if args.train is None:
examples = processor.get_train_examples(args.data_dir)
else:
examples = processor.get_examples(args.train, "train")
elif "finetune" in eval_split:
if args.finetune is None:
examples = processor.get_finetune_examples(args.data_dir)
else:
examples = processor.get_examples(args.finetune, "finetune")
elif "dev" in eval_split:
if args.dev is None:
examples = processor.get_dev_examples(args.data_dir)
else:
examples = processor.get_examples(args.dev, "dev")
elif "test" in eval_split:
if args.test is None:
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_examples(args.test, "test")
else:
raise ValueError(f"eval_split should be train / dev / test, but was given {eval_split}")
return examples
def get_winogrande_tensors(features):
def select_field(features, field):
return [[choice[field] for choice in feature.choices_features] for feature in features]
# Convert to Tensors and build dataset
input_ids = torch.tensor(select_field(features, "input_ids"), dtype=torch.long)
input_mask = torch.tensor(select_field(features, "input_mask"), dtype=torch.long)
segment_ids = torch.tensor(select_field(features, "segment_ids"), dtype=torch.long)
label_ids = torch.tensor([f.label for f in features], dtype=torch.long)
example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids, example_ids)
return dataset
def load_and_cache_examples(args, task, tokenizer, evaluate=False, data_split="train"):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
if not os.path.exists(args.features_cache_dir):
os.makedirs(args.features_cache_dir)
cached_features_file = os.path.join(
args.features_cache_dir,
"cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
# Load data features from cache or dataset file
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
# original_id = torch.load(cached_id_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
print('label_list', label_list)
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = load_dataset(args, task, data_split)
original_id = []
if task == "winogrande":
features = convert_mc_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,)
else:
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,)
original_id.append(or_id)
print('len_1', len(features))
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
# logger.info("Saving original_id into cached file %s", cached_id_file)
torch.save(features, cached_features_file)
# torch.save(original_id, cached_id_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training
# process the dataset, and the others will use the cache
torch.distributed.barrier()
if task == "winogrande":
return get_winogrande_tensors(features)
# Convert to Tensors and build dataset
if args.do_train:
if args.setting =='1':
features = features
print('setting_1')
if args.setting == '2':
features=features
print('setting_2')
if args.setting == '3':
features=features
print('setting_3')
if args.setting =='549k_2_1':
sub_features = features
new_features= random.sample(sub_features, 544368)
features = new_features
if args.setting =='549k_2_2':
sub_features = features
new_features= random.sample(sub_features, 544368)
features = new_features
if args.setting =='549k_2_3':
sub_features = features
new_features= random.sample(sub_features, 544368)
features = new_features
if args.setting =='549k_3_1':
sub_features = features
new_features= random.sample(sub_features, 539368)
features = new_features
print('len_2', len(features))
# print('label', [item.label for item in features])
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
# print('example_id', all_example_ids)
if output_mode == "classification":
final_label=[]
for f in features:
if type(f.label)==list:
n_0 = 0
n_1 = 0
n_2 = 0
for i in f.label:
if i==0:
n_0=n_0+1
if i==1:
n_1=n_1+1
if i==2:
n_2=n_2+1
final_label.append([n_0/10, n_1/10, n_2/10])
else:
if f.label == 0:
label = [1, 0, 0]
if f.label == 1:
label = [0, 1, 0]
if f.label == 2:
label = [0, 0, 1]
final_label.append(label)
all_labels = torch.tensor([item for item in final_label], dtype=torch.float)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)
return dataset #, original_id
def finetune_load_and_cache_examples(args, task, tokenizer, evaluate=False, data_split="finetune"):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
if not os.path.exists(args.features_cache_dir):
os.makedirs(args.features_cache_dir)
cached_features_file = os.path.join(
args.features_cache_dir,
"cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
# Load data features from cache or dataset file
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
# original_id = torch.load(cached_id_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
print('label_list', label_list)
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = load_dataset(args, task, data_split)
original_id = []
if task == "winogrande":
features = convert_mc_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,)
else:
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,)
original_id.append(or_id)
print('len_1', len(features))
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
# logger.info("Saving original_id into cached file %s", cached_id_file)
torch.save(features, cached_features_file)
# torch.save(original_id, cached_id_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training
# process the dataset, and the others will use the cache
torch.distributed.barrier()
if task == "winogrande":
return get_winogrande_tensors(features)
# Convert to Tensors and build dataset
print('finetune_features', len(features))
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
if output_mode == "classification":
final_label=[]
for f in features:
if type(f.label)==list:
n_0 = 0
n_1 = 0
n_2 = 0
for i in f.label:
if i==0:
n_0=n_0+1
if i==1:
n_1=n_1+1
if i==2:
n_2=n_2+1
final_label.append([n_0/10, n_1/10, n_2/10])
else:
if f.label==0:
label=[1, 0, 0]
if f.label==1:
label=[0, 1, 0]
if f.label==2:
label=[0, 0, 1]
final_label.append(label)
all_labels = torch.tensor([item for item in final_label], dtype=torch.float)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)
return dataset #, original_id
def Convert(string):
li = list(string.split(" "))
return li
def build_vocab(examples):
vocab = {}
def add_to_vocab(word_list):
for word in word_list:
if word not in vocab:
vocab[word] = len(vocab)
for i in range(len(examples)):
add_to_vocab(Convert(examples[i].text_a))
if examples[i].text_b:
add_to_vocab(Convert(examples[i].text_b))
return vocab
def lp_finetune_load_and_cache_examples(args, task, tokenizer, label_flag, evaluate=False, data_split="train"):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
if not os.path.exists(args.features_cache_dir):
os.makedirs(args.features_cache_dir)
cached_features_file = os.path.join(
args.features_cache_dir,
"cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
# Load data features from cache or dataset file
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
# original_id = torch.load(cached_id_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
print('label_list', label_list)
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
#label propagation
if label_flag=='single_aug_label':
examples_beg = load_dataset(args, task, data_split)
data_stats = word_level_augment.get_data_stats(examples_beg)
aug_ops = "tf_idf-0.18"
word_vocab = build_vocab(examples_beg)
examples_aug = word_level_augment.word_level_augment(
examples_beg, aug_ops, word_vocab, data_stats)
for i in examples_aug:
listToStr_a = ' '.join([str(elem) for elem in i.text_a])
listToStr_b = ' '.join([str(elem) for elem in i.text_b])
i.text_a = listToStr_a
i.text_b =listToStr_b
if label_flag =='single_label':
original_examples = load_dataset(args, task, data_split)
# import pdb
# pdb.set_trace()
original_id = []
if task == "winogrande":
examples =original_examples
features = convert_mc_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,)
else:
if label_flag =='single_label':
examples = original_examples
# print('single', examples[0])
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,)
original_id.append(or_id)
print('len_1', len(features))
if label_flag =='single_aug_label':
examples = examples_aug
# print('aug', examples[0])
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0, )
original_id.append(or_id)
print('len_1', len(features))
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
# logger.info("Saving original_id into cached file %s", cached_id_file)
torch.save(features, cached_features_file)
# torch.save(original_id, cached_id_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training
# process the dataset, and the others will use the cache
torch.distributed.barrier()
if task == "winogrande":
return get_winogrande_tensors(features)
# Convert to Tensors and build dataset
print('finetune_features', len(features))
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
# print('example_id', all_example_ids)
if output_mode == "classification":
final_label=[]
for f in features:
if type(f.label)==list:
n_0 = 0
n_1 = 0
n_2 = 0
for i in f.label:
if i==0:
n_0=n_0+1
if i==1:
n_1=n_1+1
if i==2:
n_2=n_2+1
final_label.append([n_0/10, n_1/10, n_2/10])
else:
if f.label==0:
label=[1, 0, 0]
if f.label==1:
label=[0, 1, 0]
if f.label==2:
label=[0, 0, 1]
final_label.append(label)
all_labels = torch.tensor([item for item in final_label], dtype=torch.float)
# print('final_label', final_label)
# print('train_all_labels', all_labels)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)
return dataset #, original_id
def eval_load_and_cache_examples(args, task, tokenizer, evaluate=True, data_split=f"dev_"""):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
if not os.path.exists(args.features_cache_dir):
os.makedirs(args.features_cache_dir)
cached_features_file = os.path.join(
args.features_cache_dir,
"cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
cached_id_file = os.path.join(
args.features_cache_dir,
"id_cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
cached_eval_data_file = os.path.join(
args.features_cache_dir,
"eval_data_cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
# Load data features from cache or dataset file
if os.path.exists(cached_eval_data_file):
logger.info("Loading features from cached file %s", cached_eval_data_file)
features = torch.load(cached_eval_data_file)
original_id = torch.load(cached_id_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = load_dataset(args, task, data_split)
original_id = []
if task == "winogrande":
features = convert_mc_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,)
else:
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,)
original_id.append(or_id)
logger.info("***********Create New Feautrs****************************************")
print('creating_eval_len_new_features', len(features))
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
logger.info("Saving original_id into cached file %s", cached_id_file)
logger.info("Saving eval data into cached file %s", cached_eval_data_file)
torch.save(features, cached_features_file)
torch.save(original_id, cached_id_file)
torch.save(features, cached_eval_data_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training
# process the dataset, and the others will use the cache
torch.distributed.barrier()
if task == "winogrande":
return get_winogrande_tensors(features)
print('eval_features', len(features))
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
if output_mode == "classification":
final_label = []
for f in features:
if type(f.label) == list:
n_0 = 0
n_1 = 0
n_2 = 0
for i in f.label:
if i == 0:
n_0 = n_0 + 1
if i == 1:
n_1 = n_1 + 1
if i == 2:
n_2 = n_2 + 1
final_label.append([n_0 / 10, n_1 / 10, n_2 / 10])
else:
if f.label == 0:
label = [1, 0, 0]
if f.label == 1:
label = [0, 1, 0]
if f.label == 2:
label = [0, 0, 1]
final_label.append(label)
all_labels = torch.tensor([item for item in final_label], dtype=torch.float)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)
return dataset, original_id
def run_transformer(args):
if (os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir):
raise ValueError(
f"Output directory ({args.output_dir}) already exists and is not empty."
f" Use --overwrite_output_dir to overcome.")
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see
# https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
logger.info("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
args.learning_rate = float(args.learning_rate)
if args.setting !='1' and args.do_finetune:
args.finetune_learning_rate = float(args.finetune_learning_rate)
if args.do_train:
# If training for the first time, remove cache. If training from a checkpoint, keep cache.
if os.path.exists(args.features_cache_dir) and not args.overwrite_output_dir:
logger.info(f"Found existing cache for the same seed {args.seed}: "
f"{args.features_cache_dir}...Deleting!")
shutil.rmtree(args.features_cache_dir)
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
save_args_to_file(args, mode="train")
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
print('len_train_dataset', len(train_dataset))
flag_in_training ='train'
global_step, tr_loss = train(args, train_dataset, model, tokenizer, flag_in_training)
logger.info(f" global_step = {global_step}, average loss = {tr_loss:.4f}")
#Finetune small dataset
if args.setting !='1' and args.do_finetune:
finetune_dataset=finetune_load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
flag_in_training = 'finetune'
if args.label_propagation:
label_flag='single_label'
single_dataset = lp_finetune_load_and_cache_examples(args, args.task_name, tokenizer, label_flag, evaluate=False)
label_flag = 'single_aug_label'
single_aug_dataset=lp_finetune_load_and_cache_examples(args, args.task_name, tokenizer, label_flag, evaluate=False)
global_step, tr_loss = lp_train(args, finetune_dataset, single_dataset, single_aug_dataset,
model, tokenizer, flag_in_training)
logger.info(f" global_step = {global_step}, average loss = {tr_loss:.4f}")
else:
global_step, tr_loss = train(args, finetune_dataset,
model, tokenizer, flag_in_training)
logger.info(f" global_step = {global_step}, average loss = {tr_loss:.4f}")
# Saving best-practices: if you use defaults names for the model,
# you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not args.evaluate_during_training:
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = (model.module if hasattr(model, "module") else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
logger.info(" **** Done with training ****")
# Evaluation
eval_splits = []
if args.do_eval:
eval_splits.append("dev")
if args.do_test:
eval_splits.append("test")
if args.do_test or args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
results = {}
prefix = args.test.split("/")[-1].split(".tsv")[0] if args.test else ""
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix += checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
for eval_split in eval_splits:
save_args_to_file(args, mode=eval_split)
result, predictions = evaluate(args, model, tokenizer, prefix=prefix, eval_split=eval_split)
result = dict((k + f"_{global_step}", v) for k, v in result.items())
results.update(result)
if args.test and "diagnostic" in args.test:
# For running diagnostics with MNLI, run as SNLI and use hack.
evaluate_by_category(predictions[args.task_name],
mnli_hack=True if args.task_name in ["SNLI", "snli"] and "mnli" in args.output_dir else False,
eval_filename=os.path.join(args.output_dir, f"eval_metrics_diagnostics.json"),
diagnostics_file_carto=args.test)
logger.info(" **** Done ****")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config",
"-c",
type=os.path.abspath,
required=True,
help="Main config file with basic arguments.")
parser.add_argument("--output_dir",
"-o",
type=os.path.abspath,
required=True,
help="Output directory for model.")
parser.add_argument("--do_train",
action="store_true",
help="Whether to run training.")
parser.add_argument("--do_eval",
action="store_true",
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test",
action="store_true",
help="Whether to run eval on the (OOD) test set.")
parser.add_argument("--do_finetune",
action="store_true",
help="Whether to finetune.")
parser.add_argument("--label_propagation",
action="store_true",
help="Whether to label propagation.")
parser.add_argument('--ft_num_train_epochs', type=float, help="finetuning epochs")
# parser.add_argument("--model_name_or_path",
# type=os.path.abspath,
# required=True,
# help="Model Chekpoints")
parser.add_argument("--do_temperature",
action="store_true",
help="Whether to temperature scaling.")
parser.add_argument("--do_train_label_smooth",
action="store_true",
help="Whether to do train label smoothing.")
parser.add_argument("--overwrite_output_dir",
action="store_true",
help="Whether to overwrite the previous output.")
parser.add_argument("--overwrite_cache",
action="store_true",
help="Whether to overwrite the previous dqta cache.")
parser.add_argument("--use_existing_eval_data",
action="store_true",
help="Whether to use the existing eval data to eval.")
parser.add_argument('--setting', type=str, help="Different setting")
parser.add_argument("--test",
type=os.path.abspath,
help="OOD test set.")
# TODO(SS): Automatically map tasks to OOD test sets.
args_from_cli = parser.parse_args()
other_args = json.loads(_jsonnet.evaluate_file(args_from_cli.config))
other_args.update(**vars(args_from_cli))
args = Params(MODEL_CLASSES, ALL_MODELS, processors, other_args)
run_transformer(args)
if __name__ == "__main__":
main()
| 42.668776 | 207 | 0.601854 |
import _jsonnet
import argparse
import glob
import json
import logging
import numpy as np
import os
import random
import shutil
import torch
from scipy.spatial import distance
from scipy.stats import entropy
from itertools import cycle
import torch.nn as nn
import word_level_augment
import torch.nn.functional as F
import mmd_loss
from torch.utils.data import Dataset
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertTokenizer,
RobertaConfig,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from cartography.classification.glue_utils import adapted_glue_compute_metrics as compute_metrics
from cartography.classification.glue_utils import adapted_glue_convert_examples_to_features as convert_examples_to_features
from cartography.classification.glue_utils import glue_output_modes as output_modes
from cartography.classification.glue_utils import glue_processors as processors
from cartography.classification.diagnostics_evaluation import evaluate_by_category
from cartography.classification.models import (
AdaptedBertForMultipleChoice,
AdaptedBertForSequenceClassification,
AdaptedRobertaForMultipleChoice,
AdaptedRobertaForSequenceClassification
)
from cartography.classification.multiple_choice_utils import convert_mc_examples_to_features
from cartography.classification.params import Params, save_args_to_file
from cartography.selection.selection_utils import log_training_dynamics
from cartography.data_utils_glue import convert_string_to_unique_number
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
RobertaConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, AdaptedBertForSequenceClassification, BertTokenizer),
"bert_mc": (BertConfig, AdaptedBertForMultipleChoice, BertTokenizer),
"roberta": (RobertaConfig, AdaptedRobertaForSequenceClassification, RobertaTokenizer),
"roberta_mc": (RobertaConfig, AdaptedRobertaForMultipleChoice, RobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
class TripleDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
def train(args, train_dataset, model, tokenizer, flag_in_training):
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_dataloader = DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=True)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (
len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
if flag_in_training =='finetune':
optimizer = AdamW(optimizer_grouped_parameters, lr=args.finetune_learning_rate, eps=args.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" local_rank = %d", args.local_rank)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_this_epoch = 0
if os.path.exists(args.model_name_or_path):
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_this_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(f" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {global_step}")
logger.info(f" Will skip the first {steps_trained_in_this_epoch} steps in the first epoch")
tr_loss, logging_loss, epoch_loss = 0.0, 0.0, 0.0
model.zero_grad()
if flag_in_training =='finetune':
train_iterator = trange(epochs_trained,
(int(args.num_train_epochs)*3),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
else:
train_iterator = trange(epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
set_seed(args)
best_dev_performance = 0
best_epoch = epochs_trained
train_acc = 0.0
total_entropy = 0.
total_sample_size = 0
for epoch, _ in enumerate(train_iterator):
epoch_iterator = tqdm(train_dataloader,
desc="Iteration",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
train_iterator.set_description(f"train_epoch: {epoch} train_acc: {train_acc:.4f}")
train_ids = None
train_golds = None
train_logits = None
train_losses = None
for step, batch in enumerate(epoch_iterator):
if steps_trained_in_this_epoch > 0:
steps_trained_in_this_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
)
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
entropy=outputs[-1]
if train_logits is None: # Keep track of training dynamics.
train_ids = batch[4].detach().cpu().numpy()
train_logits = outputs[1].detach().cpu().numpy()
train_golds = inputs["labels"].detach().cpu().numpy()
# train_golds = [l.tolist() for l in train_golds]
# print('initial_train_gold', train_golds)
train_losses = loss.detach().cpu().numpy()
train_entropy = entropy.detach().cpu().numpy()
print(entropy.size(), "check entropy size")
else:
train_ids = np.append(train_ids, batch[4].detach().cpu().numpy())
train_logits = np.append(train_logits, outputs[1].detach().cpu().numpy(), axis=0)
train_golds = np.concatenate((train_golds, inputs["labels"].detach().cpu().numpy()), 0)
train_losses = np.append(train_losses, loss.detach().cpu().numpy())
train_entropy = np.append(train_entropy, entropy.detach().cpu().numpy())
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0] and
args.logging_steps > 0 and
global_step % args.logging_steps == 0
):
epoch_log = {}
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training_epoch:
logger.info(f"From within the epoch at step {step}")
results, _ = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
epoch_log[eval_key] = value
epoch_log["learning_rate"] = scheduler.get_lr()[0]
epoch_log["loss"] = (tr_loss - logging_loss) / args.logging_steps
logging_loss = tr_loss
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**epoch_log, **{"step": global_step}}))
if (
args.local_rank in [-1, 0] and
args.save_steps > 0 and
global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
epoch_iterator.set_description(f"lr = {scheduler.get_lr()[0]:.8f}, "
f"loss = {(tr_loss-epoch_loss)/(step+1):.4f}")
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
# mean_entropy = total_entropy / total_sample_size
mean_entropy= np.sum(train_entropy) / np.where(train_entropy < -(np.ones_like(train_entropy) * 1e-10),
np.ones_like(train_entropy),
np.zeros_like(train_entropy)).sum()
logger.info(f"*********************************selected_questions*********************************: {mean_entropy:.4f}***")
#### Post epoch eval ####
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
best_dev_performance, best_epoch = save_model(
args, model, tokenizer, epoch, best_epoch, best_dev_performance)
log_training_dynamics(output_dir=args.output_dir,
epoch=epoch,
train_ids=list(train_ids),
train_logits=list(train_logits),
train_golds=list(train_golds))
train_result = compute_metrics(args.task_name, np.argmax(train_logits, axis=1), train_golds)
train_acc = train_result["acc"]
epoch_log = {"epoch": epoch,
"train_acc": train_acc,
"best_dev_performance": best_dev_performance,
"avg_batch_loss": (tr_loss - epoch_loss) / args.per_gpu_train_batch_size,
"learning_rate": scheduler.get_lr()[0],}
epoch_loss = tr_loss
logger.info(f" End of epoch : {epoch}")
with open(os.path.join(args.output_dir, f"eval_metrics_train.json"), "a") as toutfile:
toutfile.write(json.dumps(epoch_log) + "\n")
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(f" {key}: {value:.6f}")
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
elif args.evaluate_during_training and epoch - best_epoch >= args.patience:
logger.info(f"Ran out of patience. Best epoch was {best_epoch}. "
f"Stopping training at epoch {epoch} out of {args.num_train_epochs} epochs.")
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def interleave(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def lp_train(args, train_dataset, single_dataset, single_aug_dataset, model, tokenizer, flag_in_training):
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# train_sampler = RandomSampler(
# train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=True)
if args.label_propagation and args.do_finetune:
single_aug= TripleDataset(single_dataset, single_aug_dataset)
single_train_dataloader = DataLoader(
single_aug, batch_size=args.train_batch_size, shuffle=True)
if args.max_steps > 0:
t_total = args.max_steps
args.ft_num_train_epochs = args.max_steps // (
len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.ft_num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
if flag_in_training =='finetune':
optimizer = AdamW(optimizer_grouped_parameters, lr=args.finetune_learning_rate, eps=args.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
# args.local_rank = -1
# get_world_size = 1
# args.train_batch_size = 128
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.ft_num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" local_rank = %d", args.local_rank)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_this_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_this_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(f" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {global_step}")
logger.info(f" Will skip the first {steps_trained_in_this_epoch} steps in the first epoch")
tr_loss, logging_loss, epoch_loss = 0.0, 0.0, 0.0
model.zero_grad()
if flag_in_training =='finetune':
train_iterator = trange(epochs_trained,
int(args.ft_num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
else:
train_iterator = trange(epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
set_seed(args) # Added here for reproductibility
best_dev_performance = 0
best_epoch = epochs_trained
train_acc = 0.0
total_entropy = 0.
total_sample_size = 0
for epoch, _ in enumerate(train_iterator):
epoch_iterator = tqdm(train_dataloader,
desc="Iteration",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
train_iterator.set_description(f"train_epoch: {epoch} train_acc: {train_acc:.4f}")
train_ids = None
train_golds = None
train_logits = None
train_losses = None
#label propagation
# lingyige_loader = None
single_iter = iter(single_train_dataloader)
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_this_epoch > 0:
steps_trained_in_this_epoch -= 1
continue
#在这个for loop 里面或外面 加data loader 的判断,就是两个data loader load进去
model.train()
try:
inputs_u_w, inputs_u_s = single_iter.next()
except StopIteration:
single_iter = iter(single_train_dataloader)
inputs_u_w, inputs_u_s = single_iter.next()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0]
if args.label_propagation and args.do_finetune:
batch_single = tuple(t.to(args.device) for t in inputs_u_w)
inputs_single = {"input_ids": batch_single[0], "attention_mask": batch_single[1], "labels": batch_single[3]}
if args.model_type != "distilbert":
inputs_single["token_type_ids"] = (
batch_single[2] if args.model_type in ["bert", "xlnet", "albert"] else None
)
batch_single_aug = tuple(t.to(args.device) for t in inputs_u_s)
inputs_single_aug = {"input_ids": batch_single_aug[0], "attention_mask": batch_single_aug[1], "labels": batch_single_aug[3]}
if args.model_type != "distilbert":
inputs_single_aug["token_type_ids"] = (
batch_single_aug[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
reg_loss=nn.KLDivLoss()(F.log_softmax(model(**inputs_single_aug)[1]), torch.softmax(model(**inputs_single)[1], dim=-1).detach())
loss= loss + reg_loss *1.0
entropy=outputs[-1]
if train_logits is None:
train_ids = batch[4].detach().cpu().numpy()
train_logits = outputs[1].detach().cpu().numpy()
train_golds = inputs["labels"].detach().cpu().numpy()
train_losses = loss.detach().cpu().numpy()
train_entropy = entropy.detach().cpu().numpy()
print(entropy.size(), "check entropy size")
else:
train_ids = np.append(train_ids, batch[4].detach().cpu().numpy())
train_logits = np.append(train_logits, outputs[1].detach().cpu().numpy(), axis=0)
train_golds = np.concatenate((train_golds, inputs["labels"].detach().cpu().numpy()), 0)
train_losses = np.append(train_losses, loss.detach().cpu().numpy())
train_entropy = np.append(train_entropy, entropy.detach().cpu().numpy())
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0] and
args.logging_steps > 0 and
global_step % args.logging_steps == 0
):
epoch_log = {}
if args.local_rank == -1 and args.evaluate_during_training_epoch:
logger.info(f"From within the epoch at step {step}")
results, _ = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
epoch_log[eval_key] = value
epoch_log["learning_rate"] = scheduler.get_lr()[0]
epoch_log["loss"] = (tr_loss - logging_loss) / args.logging_steps
logging_loss = tr_loss
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**epoch_log, **{"step": global_step}}))
if (
args.local_rank in [-1, 0] and
args.save_steps > 0 and
global_step % args.save_steps == 0
):
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
epoch_iterator.set_description(f"lr = {scheduler.get_lr()[0]:.8f}, "
f"loss = {(tr_loss-epoch_loss)/(step+1):.4f}")
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
mean_entropy= np.sum(train_entropy) / np.where(train_entropy < -(np.ones_like(train_entropy) * 1e-10),
np.ones_like(train_entropy),
np.zeros_like(train_entropy)).sum()
logger.info(f"*********************************selected_questions*********************************: {mean_entropy:.4f}***")
best_dev_performance, best_epoch = save_model(
args, model, tokenizer, epoch, best_epoch, best_dev_performance)
log_training_dynamics(output_dir=args.output_dir,
epoch=epoch,
train_ids=list(train_ids),
train_logits=list(train_logits),
train_golds=list(train_golds))
train_result = compute_metrics(args.task_name, np.argmax(train_logits, axis=1), train_golds)
train_acc = train_result["acc"]
epoch_log = {"epoch": epoch,
"train_acc": train_acc,
"best_dev_performance": best_dev_performance,
"avg_batch_loss": (tr_loss - epoch_loss) / args.per_gpu_train_batch_size,
"learning_rate": scheduler.get_lr()[0],}
epoch_loss = tr_loss
logger.info(f" End of epoch : {epoch}")
with open(os.path.join(args.output_dir, f"eval_metrics_train.json"), "a") as toutfile:
toutfile.write(json.dumps(epoch_log) + "\n")
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(f" {key}: {value:.6f}")
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
elif args.evaluate_during_training and epoch - best_epoch >= args.patience:
logger.info(f"Ran out of patience. Best epoch was {best_epoch}. "
f"Stopping training at epoch {epoch} out of {args.ft_num_train_epochs} epochs.")
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def save_model(args, model, tokenizer, epoch, best_epoch, best_dev_performance):
results, _ = evaluate(args, model, tokenizer, prefix="in_training")
desired_metric = "acc"
dev_performance = results.get(desired_metric)
if True:
best_epoch = epoch
best_dev_performance = dev_performance
model_to_save = (model.module if hasattr(model, "module") else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
logger.info(f"*** Found BEST model, and saved checkpoint. "
f"BEST dev performance : {dev_performance:.4f} ***")
return best_dev_performance, best_epoch
def JSD(P, Q):
M = 0.5 * (P + Q)
return 0.5 * (entropy(P, M) + entropy(Q, M))
def JSD_2(P, Q):
P= np.array(P, dtype=float)
Q= np.array(Q, dtype=float)
M = 0.5 * (P+Q)
_jsd = 0.5* ((torch.nn.functional.kl_div(torch.log(torch.from_numpy(M)), torch.from_numpy(P)).numpy() - 0) + (torch.nn.functional.kl_div(torch.log(torch.from_numpy(M)), torch.from_numpy(Q)).numpy() - 0))
return _jsd
def evaluate(args, model, tokenizer, prefix="", eval_split="dev"):
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
all_predictions = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset, pair_id = eval_load_and_cache_examples(
args, eval_task, tokenizer, evaluate=True, data_split=f"{eval_split}_{prefix}")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
logger.info(f"***** Running {eval_task} {prefix} evaluation on {eval_split} *****")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.eval_batch_size}")
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
example_ids = []
gold_labels = []
for batch in tqdm(eval_dataloader, desc="Evaluating", mininterval=10, ncols=100):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
example_ids += batch[4].tolist()
gold_labels += batch[3].tolist()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
probs = torch.nn.functional.softmax(torch.Tensor(preds), dim=-1)
if args.do_temperature:
probs = torch.nn.functional.softmax(torch.Tensor(preds)/1.75, dim=-1)
max_confidences = (torch.max(probs, dim=-1)[0]).tolist()
preds = np.argmax(preds, axis=1) # Max of logit is the same as max of probability.
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
# order: [E, N, C]
results.update(result)
output_eval_file = os.path.join(
eval_output_dir, f"eval_metrics_{eval_task}_{eval_split}_{prefix}.json")
logger.info(f"***** {eval_task} {eval_split} results {prefix} *****")
for key in sorted(result.keys()):
logger.info(f"{eval_task} {eval_split} {prefix} {key} = {result[key]:.4f}")
with open(output_eval_file, "a") as writer:
writer.write(json.dumps(results) + "\n")
# predictions
all_predictions[eval_task] = []
output_pred_file = os.path.join(
eval_output_dir, f"predictions_{eval_task}_{eval_split}_{prefix}.lst")
with open(output_pred_file, "w") as writer:
logger.info(f"***** Write {eval_task} {eval_split} predictions {prefix} *****")
for ex_id, pred, gold, max_conf, prob in zip(
example_ids, preds, gold_labels, max_confidences, probs.tolist()):
# print(pred, prob, gold);input()
# print('gold_label', processors[args.task_name]().get_labels()[int(max(gold))])
record = {"guid": ex_id,
"label": processors[args.task_name]().get_labels()[pred],
"gold": processors[args.task_name]().get_labels()[int(np.argmax(gold))],
"confidence": max_conf,
"probabilities": prob}
all_predictions[eval_task].append(record)
writer.write(json.dumps(record) + "\n")
# order: [E, N, C]
combined_id = dict()
for id in pair_id[0]:
each_id= convert_string_to_unique_number(id)
combined_id[each_id] = id
ours_file = os.path.join(
eval_output_dir, f"ours_{eval_task}_{eval_split}_{prefix}.json")
result_dict=dict()
result_dict['ours']=dict()
js_divergence_list = []
prediction_entropy_list=[]
kl_divergence_list = []
new_js_divergence_list=[]
new_js_divergence_list_2 = []
with open(ours_file, "w") as writer:
logger.info(f"***** Write ours {eval_task} {eval_split} predictions {prefix} *****")
for ex_id, pred, gold, max_conf, prob in zip(
example_ids, preds, gold_labels, max_confidences, probs.tolist()):
# print(pred, prob, gold);input()
if ex_id in list(combined_id.keys()):
ex_idvalue = combined_id[ex_id]
else:
ex_idvalue ='000000'
# ex_idvalue =combined_id[ex_id]
result_dict['ours'][ex_idvalue]= {"uid": ex_idvalue,
"predicted_probabilities": prob,
"predicted_label": processors[args.task_name]().get_labels()[pred]}
gold_dist = gold
cur_js_divergence = distance.jensenshannon(gold_dist, prob)
if np.isnan(cur_js_divergence):
print("JS for this example is `nan', we will set JS to 0 for the current example. "
"This can be a potential error.",
"Gold distribution:", gold_dist,
"Model distribution:", prob,)
cur_js_divergence = 0
else:
pass
new_cur_js_divergence=JSD(np.array(prob, dtype=float), np.array(gold_dist, dtype=float))
new_cur_js_divergence_2 = JSD_2(prob, gold_dist)
js_divergence_list.append(cur_js_divergence)
new_js_divergence_list.append(new_cur_js_divergence)
new_js_divergence_list_2.append(new_cur_js_divergence_2)
prediction_entropy = entropy(prob)
prediction_entropy_list.append(prediction_entropy)
cur_kl_divergence = torch.nn.functional.kl_div(torch.log(torch.from_numpy(np.array(prob, dtype=float))), torch.from_numpy(np.array(gold_dist, dtype=float))).numpy() - 0
kl_divergence_list.append(cur_kl_divergence)
writer.write(json.dumps(result_dict) + "\n")
avg_js_div = np.mean(js_divergence_list)
new_avg_js_div= np.mean(new_js_divergence_list)
new_avg_js_div_2 = np.mean(new_js_divergence_list_2)
avg_kl_div = np.mean(kl_divergence_list)
avg_entropy=np.mean(prediction_entropy_list)
logger.info(f"***** JS {eval_task} {eval_split} {prefix} = {avg_js_div:.4f}")
logger.info(f"***** entropy JS {eval_task} {eval_split} {prefix} = {new_avg_js_div:.4f}")
logger.info(f"***** kl JS {eval_task} {eval_split} {prefix} = {new_avg_js_div_2:.4f}")
logger.info(f"***** KL {eval_task} {eval_split} {prefix} = {avg_kl_div:.4f}")
logger.info(f"***** Prediction Entropy {eval_task} {eval_split} {prefix} = {avg_entropy:.4f}")
return results, all_predictions
def load_dataset(args, task, eval_split="train"):
processor = processors[task]()
if eval_split == "train":
if args.train is None:
examples = processor.get_train_examples(args.data_dir)
else:
examples = processor.get_examples(args.train, "train")
elif "finetune" in eval_split:
if args.finetune is None:
examples = processor.get_finetune_examples(args.data_dir)
else:
examples = processor.get_examples(args.finetune, "finetune")
elif "dev" in eval_split:
if args.dev is None:
examples = processor.get_dev_examples(args.data_dir)
else:
examples = processor.get_examples(args.dev, "dev")
elif "test" in eval_split:
if args.test is None:
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_examples(args.test, "test")
else:
raise ValueError(f"eval_split should be train / dev / test, but was given {eval_split}")
return examples
def get_winogrande_tensors(features):
def select_field(features, field):
return [[choice[field] for choice in feature.choices_features] for feature in features]
input_ids = torch.tensor(select_field(features, "input_ids"), dtype=torch.long)
input_mask = torch.tensor(select_field(features, "input_mask"), dtype=torch.long)
segment_ids = torch.tensor(select_field(features, "segment_ids"), dtype=torch.long)
label_ids = torch.tensor([f.label for f in features], dtype=torch.long)
example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids, example_ids)
return dataset
def load_and_cache_examples(args, task, tokenizer, evaluate=False, data_split="train"):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
if not os.path.exists(args.features_cache_dir):
os.makedirs(args.features_cache_dir)
cached_features_file = os.path.join(
args.features_cache_dir,
"cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
print('label_list', label_list)
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = load_dataset(args, task, data_split)
original_id = []
if task == "winogrande":
features = convert_mc_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,)
else:
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,)
original_id.append(or_id)
print('len_1', len(features))
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if task == "winogrande":
return get_winogrande_tensors(features)
if args.do_train:
if args.setting =='1':
features = features
print('setting_1')
if args.setting == '2':
features=features
print('setting_2')
if args.setting == '3':
features=features
print('setting_3')
if args.setting =='549k_2_1':
sub_features = features
new_features= random.sample(sub_features, 544368)
features = new_features
if args.setting =='549k_2_2':
sub_features = features
new_features= random.sample(sub_features, 544368)
features = new_features
if args.setting =='549k_2_3':
sub_features = features
new_features= random.sample(sub_features, 544368)
features = new_features
if args.setting =='549k_3_1':
sub_features = features
new_features= random.sample(sub_features, 539368)
features = new_features
print('len_2', len(features))
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
if output_mode == "classification":
final_label=[]
for f in features:
if type(f.label)==list:
n_0 = 0
n_1 = 0
n_2 = 0
for i in f.label:
if i==0:
n_0=n_0+1
if i==1:
n_1=n_1+1
if i==2:
n_2=n_2+1
final_label.append([n_0/10, n_1/10, n_2/10])
else:
if f.label == 0:
label = [1, 0, 0]
if f.label == 1:
label = [0, 1, 0]
if f.label == 2:
label = [0, 0, 1]
final_label.append(label)
all_labels = torch.tensor([item for item in final_label], dtype=torch.float)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)
return dataset
def finetune_load_and_cache_examples(args, task, tokenizer, evaluate=False, data_split="finetune"):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
if not os.path.exists(args.features_cache_dir):
os.makedirs(args.features_cache_dir)
cached_features_file = os.path.join(
args.features_cache_dir,
"cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
print('label_list', label_list)
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = load_dataset(args, task, data_split)
original_id = []
if task == "winogrande":
features = convert_mc_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,)
else:
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,)
original_id.append(or_id)
print('len_1', len(features))
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if task == "winogrande":
return get_winogrande_tensors(features)
print('finetune_features', len(features))
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
if output_mode == "classification":
final_label=[]
for f in features:
if type(f.label)==list:
n_0 = 0
n_1 = 0
n_2 = 0
for i in f.label:
if i==0:
n_0=n_0+1
if i==1:
n_1=n_1+1
if i==2:
n_2=n_2+1
final_label.append([n_0/10, n_1/10, n_2/10])
else:
if f.label==0:
label=[1, 0, 0]
if f.label==1:
label=[0, 1, 0]
if f.label==2:
label=[0, 0, 1]
final_label.append(label)
all_labels = torch.tensor([item for item in final_label], dtype=torch.float)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)
return dataset
def Convert(string):
li = list(string.split(" "))
return li
def build_vocab(examples):
vocab = {}
def add_to_vocab(word_list):
for word in word_list:
if word not in vocab:
vocab[word] = len(vocab)
for i in range(len(examples)):
add_to_vocab(Convert(examples[i].text_a))
if examples[i].text_b:
add_to_vocab(Convert(examples[i].text_b))
return vocab
def lp_finetune_load_and_cache_examples(args, task, tokenizer, label_flag, evaluate=False, data_split="train"):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
if not os.path.exists(args.features_cache_dir):
os.makedirs(args.features_cache_dir)
cached_features_file = os.path.join(
args.features_cache_dir,
"cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
print('label_list', label_list)
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
label_list[1], label_list[2] = label_list[2], label_list[1]
if label_flag=='single_aug_label':
examples_beg = load_dataset(args, task, data_split)
data_stats = word_level_augment.get_data_stats(examples_beg)
aug_ops = "tf_idf-0.18"
word_vocab = build_vocab(examples_beg)
examples_aug = word_level_augment.word_level_augment(
examples_beg, aug_ops, word_vocab, data_stats)
for i in examples_aug:
listToStr_a = ' '.join([str(elem) for elem in i.text_a])
listToStr_b = ' '.join([str(elem) for elem in i.text_b])
i.text_a = listToStr_a
i.text_b =listToStr_b
if label_flag =='single_label':
original_examples = load_dataset(args, task, data_split)
original_id = []
if task == "winogrande":
examples =original_examples
features = convert_mc_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,)
else:
if label_flag =='single_label':
examples = original_examples
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,)
original_id.append(or_id)
print('len_1', len(features))
if label_flag =='single_aug_label':
examples = examples_aug
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0, )
original_id.append(or_id)
print('len_1', len(features))
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if task == "winogrande":
return get_winogrande_tensors(features)
print('finetune_features', len(features))
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
if output_mode == "classification":
final_label=[]
for f in features:
if type(f.label)==list:
n_0 = 0
n_1 = 0
n_2 = 0
for i in f.label:
if i==0:
n_0=n_0+1
if i==1:
n_1=n_1+1
if i==2:
n_2=n_2+1
final_label.append([n_0/10, n_1/10, n_2/10])
else:
if f.label==0:
label=[1, 0, 0]
if f.label==1:
label=[0, 1, 0]
if f.label==2:
label=[0, 0, 1]
final_label.append(label)
all_labels = torch.tensor([item for item in final_label], dtype=torch.float)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)
return dataset
def eval_load_and_cache_examples(args, task, tokenizer, evaluate=True, data_split=f"dev_"""):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
if not os.path.exists(args.features_cache_dir):
os.makedirs(args.features_cache_dir)
cached_features_file = os.path.join(
args.features_cache_dir,
"cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
cached_id_file = os.path.join(
args.features_cache_dir,
"id_cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
cached_eval_data_file = os.path.join(
args.features_cache_dir,
"eval_data_cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
# Load data features from cache or dataset file
if os.path.exists(cached_eval_data_file):
logger.info("Loading features from cached file %s", cached_eval_data_file)
features = torch.load(cached_eval_data_file)
original_id = torch.load(cached_id_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = load_dataset(args, task, data_split)
original_id = []
if task == "winogrande":
features = convert_mc_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,)
else:
features, or_id = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,)
original_id.append(or_id)
logger.info("***********Create New Feautrs****************************************")
print('creating_eval_len_new_features', len(features))
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
logger.info("Saving original_id into cached file %s", cached_id_file)
logger.info("Saving eval data into cached file %s", cached_eval_data_file)
torch.save(features, cached_features_file)
torch.save(original_id, cached_id_file)
torch.save(features, cached_eval_data_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training
# process the dataset, and the others will use the cache
torch.distributed.barrier()
if task == "winogrande":
return get_winogrande_tensors(features)
print('eval_features', len(features))
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
if output_mode == "classification":
final_label = []
for f in features:
if type(f.label) == list:
n_0 = 0
n_1 = 0
n_2 = 0
for i in f.label:
if i == 0:
n_0 = n_0 + 1
if i == 1:
n_1 = n_1 + 1
if i == 2:
n_2 = n_2 + 1
final_label.append([n_0 / 10, n_1 / 10, n_2 / 10])
else:
if f.label == 0:
label = [1, 0, 0]
if f.label == 1:
label = [0, 1, 0]
if f.label == 2:
label = [0, 0, 1]
final_label.append(label)
all_labels = torch.tensor([item for item in final_label], dtype=torch.float)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)
return dataset, original_id
def run_transformer(args):
if (os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir):
raise ValueError(
f"Output directory ({args.output_dir}) already exists and is not empty."
f" Use --overwrite_output_dir to overcome.")
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see
# https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
logger.info("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
args.learning_rate = float(args.learning_rate)
if args.setting !='1' and args.do_finetune:
args.finetune_learning_rate = float(args.finetune_learning_rate)
if args.do_train:
# If training for the first time, remove cache. If training from a checkpoint, keep cache.
if os.path.exists(args.features_cache_dir) and not args.overwrite_output_dir:
logger.info(f"Found existing cache for the same seed {args.seed}: "
f"{args.features_cache_dir}...Deleting!")
shutil.rmtree(args.features_cache_dir)
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
save_args_to_file(args, mode="train")
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
print('len_train_dataset', len(train_dataset))
flag_in_training ='train'
global_step, tr_loss = train(args, train_dataset, model, tokenizer, flag_in_training)
logger.info(f" global_step = {global_step}, average loss = {tr_loss:.4f}")
#Finetune small dataset
if args.setting !='1' and args.do_finetune:
finetune_dataset=finetune_load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
flag_in_training = 'finetune'
if args.label_propagation:
label_flag='single_label'
single_dataset = lp_finetune_load_and_cache_examples(args, args.task_name, tokenizer, label_flag, evaluate=False)
label_flag = 'single_aug_label'
single_aug_dataset=lp_finetune_load_and_cache_examples(args, args.task_name, tokenizer, label_flag, evaluate=False)
global_step, tr_loss = lp_train(args, finetune_dataset, single_dataset, single_aug_dataset,
model, tokenizer, flag_in_training)
logger.info(f" global_step = {global_step}, average loss = {tr_loss:.4f}")
else:
global_step, tr_loss = train(args, finetune_dataset,
model, tokenizer, flag_in_training)
logger.info(f" global_step = {global_step}, average loss = {tr_loss:.4f}")
# Saving best-practices: if you use defaults names for the model,
# you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not args.evaluate_during_training:
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = (model.module if hasattr(model, "module") else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
logger.info(" **** Done with training ****")
# Evaluation
eval_splits = []
if args.do_eval:
eval_splits.append("dev")
if args.do_test:
eval_splits.append("test")
if args.do_test or args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
results = {}
prefix = args.test.split("/")[-1].split(".tsv")[0] if args.test else ""
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix += checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
for eval_split in eval_splits:
save_args_to_file(args, mode=eval_split)
result, predictions = evaluate(args, model, tokenizer, prefix=prefix, eval_split=eval_split)
result = dict((k + f"_{global_step}", v) for k, v in result.items())
results.update(result)
if args.test and "diagnostic" in args.test:
# For running diagnostics with MNLI, run as SNLI and use hack.
evaluate_by_category(predictions[args.task_name],
mnli_hack=True if args.task_name in ["SNLI", "snli"] and "mnli" in args.output_dir else False,
eval_filename=os.path.join(args.output_dir, f"eval_metrics_diagnostics.json"),
diagnostics_file_carto=args.test)
logger.info(" **** Done ****")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config",
"-c",
type=os.path.abspath,
required=True,
help="Main config file with basic arguments.")
parser.add_argument("--output_dir",
"-o",
type=os.path.abspath,
required=True,
help="Output directory for model.")
parser.add_argument("--do_train",
action="store_true",
help="Whether to run training.")
parser.add_argument("--do_eval",
action="store_true",
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test",
action="store_true",
help="Whether to run eval on the (OOD) test set.")
parser.add_argument("--do_finetune",
action="store_true",
help="Whether to finetune.")
parser.add_argument("--label_propagation",
action="store_true",
help="Whether to label propagation.")
parser.add_argument('--ft_num_train_epochs', type=float, help="finetuning epochs")
# parser.add_argument("--model_name_or_path",
# type=os.path.abspath,
# required=True,
# help="Model Chekpoints")
parser.add_argument("--do_temperature",
action="store_true",
help="Whether to temperature scaling.")
parser.add_argument("--do_train_label_smooth",
action="store_true",
help="Whether to do train label smoothing.")
parser.add_argument("--overwrite_output_dir",
action="store_true",
help="Whether to overwrite the previous output.")
parser.add_argument("--overwrite_cache",
action="store_true",
help="Whether to overwrite the previous dqta cache.")
parser.add_argument("--use_existing_eval_data",
action="store_true",
help="Whether to use the existing eval data to eval.")
parser.add_argument('--setting', type=str, help="Different setting")
parser.add_argument("--test",
type=os.path.abspath,
help="OOD test set.")
# TODO(SS): Automatically map tasks to OOD test sets.
args_from_cli = parser.parse_args()
other_args = json.loads(_jsonnet.evaluate_file(args_from_cli.config))
other_args.update(**vars(args_from_cli))
args = Params(MODEL_CLASSES, ALL_MODELS, processors, other_args)
run_transformer(args)
if __name__ == "__main__":
main()
| true | true |
1c31864e1fda5523fc1d8ea926d23ac00da9b6a5 | 41 | py | Python | main.py | ddlucad96/IconBasedMalwareClassifier | 608f7c26741c7d5dec63dc0dde7e4bc7f844b73f | [
"MIT"
] | 2 | 2021-04-13T13:38:49.000Z | 2021-04-16T19:26:26.000Z | main.py | ddlucad96/IconBasedMalwareClassifier | 608f7c26741c7d5dec63dc0dde7e4bc7f844b73f | [
"MIT"
] | null | null | null | main.py | ddlucad96/IconBasedMalwareClassifier | 608f7c26741c7d5dec63dc0dde7e4bc7f844b73f | [
"MIT"
] | null | null | null | # TODO: IMPLEMENT HERE THE NOTEBOOK LOGIC | 41 | 41 | 0.804878 | true | true | |
1c3186c7b159ededf6e79f43559b38aeacdec58e | 11,339 | py | Python | pioreactor/background_jobs/leader/mqtt_to_db_streaming.py | Pioreactor/pioreactor | 63288a2b7ef90b663ff19b85f19586ac0b42bc1b | [
"MIT"
] | 32 | 2021-01-01T01:56:04.000Z | 2022-03-31T21:23:22.000Z | pioreactor/background_jobs/leader/mqtt_to_db_streaming.py | Pioreactor/pioreactor | 63288a2b7ef90b663ff19b85f19586ac0b42bc1b | [
"MIT"
] | 222 | 2020-11-28T18:21:56.000Z | 2022-03-30T19:23:32.000Z | pioreactor/background_jobs/leader/mqtt_to_db_streaming.py | Pioreactor/pioreactor | 63288a2b7ef90b663ff19b85f19586ac0b42bc1b | [
"MIT"
] | 3 | 2021-02-12T17:50:22.000Z | 2022-02-18T18:53:21.000Z | # -*- coding: utf-8 -*-
"""
This job runs on the leader, and is a replacement for the NodeRed database streaming job.
"""
from __future__ import annotations
from json import dumps, loads
from typing import Callable, Any
from dataclasses import dataclass
import click
from pioreactor.pubsub import QOS
from pioreactor.background_jobs.base import BackgroundJob, NiceMixin
from pioreactor.whoami import get_unit_name, UNIVERSAL_EXPERIMENT
from pioreactor.config import config
@dataclass
class SetAttrSplitTopic:
pioreactor_unit: str
experiment: str
@dataclass
class TopicToParserToTable:
topic: str
parser: Callable[[str, str], dict]
table: str
class TopicToParserToTableContrib(TopicToParserToTable):
"""
plugins subclass this.
parser (callable) must accept (topic: str, payload: str)
TODO: untested
"""
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# TODO: this can first check the db to make sure the requested table is defined.
MqttToDBStreamer.topics_to_tables_from_plugins.append(cls)
class MqttToDBStreamer(NiceMixin, BackgroundJob):
topics_to_tables_from_plugins: list[TopicToParserToTableContrib] = []
def __init__(self, topics_to_tables, **kwargs):
from sqlite3worker import Sqlite3Worker
super(MqttToDBStreamer, self).__init__(job_name="mqtt_to_db_streaming", **kwargs)
self.sqliteworker = Sqlite3Worker(
config["storage"]["database"], max_queue_size=250, raise_on_error=False
)
topics_to_tables.extend(self.topics_to_tables_from_plugins)
topics_and_callbacks = [
{
"topic": topic_to_table.topic,
"callback": self.create_on_message_callback(
topic_to_table.parser, topic_to_table.table
),
}
for topic_to_table in topics_to_tables
]
self.initialize_callbacks(topics_and_callbacks)
def on_disconnect(self):
self.sqliteworker.close() # close the db safely
def create_on_message_callback(self, parser: Callable[[str, Any], dict], table: str):
def _callback(message):
# TODO: filter testing experiments here?
try:
new_row = parser(message.topic, message.payload)
except Exception as e:
self.logger.error(e)
self.logger.debug(
f"message.payload that caused error: `{message.payload}`"
)
return
if new_row is None:
# parsers can return None to exit out.
return
cols_placeholder = ", ".join(new_row.keys())
values_placeholder = ", ".join([":" + c for c in new_row.keys()])
SQL = f"""INSERT INTO {table} ({cols_placeholder}) VALUES ({values_placeholder})"""
try:
self.sqliteworker.execute(SQL, new_row)
except Exception as e:
self.logger.error(e)
self.logger.debug(f"SQL that caused error: `{SQL}`")
return
return _callback
def initialize_callbacks(self, topics_and_callbacks: list[dict]):
for topic_and_callback in topics_and_callbacks:
self.subscribe_and_callback(
topic_and_callback["callback"],
topic_and_callback["topic"],
qos=QOS.EXACTLY_ONCE,
allow_retained=False,
)
def produce_metadata(topic: str) -> tuple[SetAttrSplitTopic, list[str]]:
# helper function for parsers below
split_topic = topic.split("/")
return (
SetAttrSplitTopic(split_topic[1], split_topic[2]),
split_topic,
)
def mqtt_to_db_streaming():
###################
# parsers
###################
# - must return a dictionary with the column names (order isn't important)
# - `produce_metadata` is a helper function, see defintion.
# - parsers can return None as well, to skip adding the message to the database.
#
def parse_od(topic, payload):
metadata, split_topic = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"od_reading_v": payload["voltage"],
"angle": payload["angle"],
"channel": split_topic[-1],
}
def parse_od_filtered(topic, payload):
metadata, split_topic = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"normalized_od_reading": payload["od_filtered"],
}
def parse_od_blank(topic, payload):
metadata, split_topic = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"od_reading_v": payload["od_reading_v"],
"channel": split_topic[-1],
}
def parse_dosing_events(topic, payload):
payload = loads(payload)
metadata, _ = produce_metadata(topic)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"volume_change_ml": payload["volume_change"],
"event": payload["event"],
"source_of_event": payload["source_of_event"],
}
def parse_led_events(topic, payload):
payload = loads(payload)
metadata, _ = produce_metadata(topic)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"channel": payload["channel"],
"intensity": payload["intensity"],
"source_of_event": payload["source_of_event"],
}
def parse_growth_rate(topic, payload):
metadata, _ = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"rate": float(payload["growth_rate"]),
}
def parse_temperature(topic, payload):
metadata, _ = produce_metadata(topic)
if not payload:
return None
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"temperature_c": float(payload["temperature"]),
}
def parse_alt_media_fraction(topic, payload):
metadata, _ = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"alt_media_fraction": float(payload["alt_media_fraction"]),
}
def parse_logs(topic, payload):
metadata, split_topic = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"message": payload["message"],
"task": payload["task"],
"level": payload["level"],
"source": split_topic[-1], # should be app, ui, etc.
}
def parse_kalman_filter_outputs(topic, payload):
metadata, _ = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"state": dumps(payload["state"]),
"covariance_matrix": dumps(payload["covariance_matrix"]),
}
def parse_automation_settings(topic, payload):
payload = loads(payload.decode())
return payload
def parse_stirring_rates(topic, payload):
if not payload:
return None
metadata, _ = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"measured_rpm": payload["rpm"],
}
topics_to_tables = [
TopicToParserToTable(
"pioreactor/+/+/growth_rate_calculating/od_filtered",
parse_od_filtered,
"od_readings_filtered",
),
TopicToParserToTable(
"pioreactor/+/+/od_reading/od_raw/+", parse_od, "od_readings_raw"
),
TopicToParserToTable(
"pioreactor/+/+/dosing_events", parse_dosing_events, "dosing_events"
),
TopicToParserToTable("pioreactor/+/+/led_events", parse_led_events, "led_events"),
TopicToParserToTable(
"pioreactor/+/+/growth_rate_calculating/growth_rate",
parse_growth_rate,
"growth_rates",
),
TopicToParserToTable(
"pioreactor/+/+/temperature_control/temperature",
parse_temperature,
"temperature_readings",
),
TopicToParserToTable(
"pioreactor/+/+/alt_media_calculating/alt_media_fraction",
parse_alt_media_fraction,
"alt_media_fraction",
),
TopicToParserToTable("pioreactor/+/+/logs/+", parse_logs, "logs"),
TopicToParserToTable(
"pioreactor/+/+/dosing_automation/dosing_automation_settings",
parse_automation_settings,
"dosing_automation_settings",
),
TopicToParserToTable(
"pioreactor/+/+/led_automation/led_automation_settings",
parse_automation_settings,
"led_automation_settings",
),
TopicToParserToTable(
"pioreactor/+/+/temperature_automation/temperature_automation_settings",
parse_automation_settings,
"temperature_automation_settings",
),
TopicToParserToTable(
"pioreactor/+/+/growth_rate_calculating/kalman_filter_outputs",
parse_kalman_filter_outputs,
"kalman_filter_outputs",
),
TopicToParserToTable(
"pioreactor/+/+/stirring/measured_rpm", parse_stirring_rates, "stirring_rates"
),
TopicToParserToTable("pioreactor/+/+/od_blank/+", parse_od_blank, "od_blanks"),
]
return MqttToDBStreamer(
topics_to_tables, experiment=UNIVERSAL_EXPERIMENT, unit=get_unit_name()
)
@click.command(name="mqtt_to_db_streaming")
def click_mqtt_to_db_streaming():
"""
(leader only) Send MQTT streams to the database. Parsers should return a dict of all the entries in the corresponding table.
"""
job = mqtt_to_db_streaming()
job.block_until_disconnected()
| 33.547337 | 128 | 0.612752 |
from __future__ import annotations
from json import dumps, loads
from typing import Callable, Any
from dataclasses import dataclass
import click
from pioreactor.pubsub import QOS
from pioreactor.background_jobs.base import BackgroundJob, NiceMixin
from pioreactor.whoami import get_unit_name, UNIVERSAL_EXPERIMENT
from pioreactor.config import config
@dataclass
class SetAttrSplitTopic:
pioreactor_unit: str
experiment: str
@dataclass
class TopicToParserToTable:
topic: str
parser: Callable[[str, str], dict]
table: str
class TopicToParserToTableContrib(TopicToParserToTable):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
MqttToDBStreamer.topics_to_tables_from_plugins.append(cls)
class MqttToDBStreamer(NiceMixin, BackgroundJob):
topics_to_tables_from_plugins: list[TopicToParserToTableContrib] = []
def __init__(self, topics_to_tables, **kwargs):
from sqlite3worker import Sqlite3Worker
super(MqttToDBStreamer, self).__init__(job_name="mqtt_to_db_streaming", **kwargs)
self.sqliteworker = Sqlite3Worker(
config["storage"]["database"], max_queue_size=250, raise_on_error=False
)
topics_to_tables.extend(self.topics_to_tables_from_plugins)
topics_and_callbacks = [
{
"topic": topic_to_table.topic,
"callback": self.create_on_message_callback(
topic_to_table.parser, topic_to_table.table
),
}
for topic_to_table in topics_to_tables
]
self.initialize_callbacks(topics_and_callbacks)
def on_disconnect(self):
self.sqliteworker.close()
def create_on_message_callback(self, parser: Callable[[str, Any], dict], table: str):
def _callback(message):
try:
new_row = parser(message.topic, message.payload)
except Exception as e:
self.logger.error(e)
self.logger.debug(
f"message.payload that caused error: `{message.payload}`"
)
return
if new_row is None:
return
cols_placeholder = ", ".join(new_row.keys())
values_placeholder = ", ".join([":" + c for c in new_row.keys()])
SQL = f"""INSERT INTO {table} ({cols_placeholder}) VALUES ({values_placeholder})"""
try:
self.sqliteworker.execute(SQL, new_row)
except Exception as e:
self.logger.error(e)
self.logger.debug(f"SQL that caused error: `{SQL}`")
return
return _callback
def initialize_callbacks(self, topics_and_callbacks: list[dict]):
for topic_and_callback in topics_and_callbacks:
self.subscribe_and_callback(
topic_and_callback["callback"],
topic_and_callback["topic"],
qos=QOS.EXACTLY_ONCE,
allow_retained=False,
)
def produce_metadata(topic: str) -> tuple[SetAttrSplitTopic, list[str]]:
split_topic = topic.split("/")
return (
SetAttrSplitTopic(split_topic[1], split_topic[2]),
split_topic,
)
def mqtt_to_db_streaming():
data.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"od_reading_v": payload["voltage"],
"angle": payload["angle"],
"channel": split_topic[-1],
}
def parse_od_filtered(topic, payload):
metadata, split_topic = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"normalized_od_reading": payload["od_filtered"],
}
def parse_od_blank(topic, payload):
metadata, split_topic = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"od_reading_v": payload["od_reading_v"],
"channel": split_topic[-1],
}
def parse_dosing_events(topic, payload):
payload = loads(payload)
metadata, _ = produce_metadata(topic)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"volume_change_ml": payload["volume_change"],
"event": payload["event"],
"source_of_event": payload["source_of_event"],
}
def parse_led_events(topic, payload):
payload = loads(payload)
metadata, _ = produce_metadata(topic)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"channel": payload["channel"],
"intensity": payload["intensity"],
"source_of_event": payload["source_of_event"],
}
def parse_growth_rate(topic, payload):
metadata, _ = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"rate": float(payload["growth_rate"]),
}
def parse_temperature(topic, payload):
metadata, _ = produce_metadata(topic)
if not payload:
return None
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"temperature_c": float(payload["temperature"]),
}
def parse_alt_media_fraction(topic, payload):
metadata, _ = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"alt_media_fraction": float(payload["alt_media_fraction"]),
}
def parse_logs(topic, payload):
metadata, split_topic = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"message": payload["message"],
"task": payload["task"],
"level": payload["level"],
"source": split_topic[-1], # should be app, ui, etc.
}
def parse_kalman_filter_outputs(topic, payload):
metadata, _ = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"state": dumps(payload["state"]),
"covariance_matrix": dumps(payload["covariance_matrix"]),
}
def parse_automation_settings(topic, payload):
payload = loads(payload.decode())
return payload
def parse_stirring_rates(topic, payload):
if not payload:
return None
metadata, _ = produce_metadata(topic)
payload = loads(payload)
return {
"experiment": metadata.experiment,
"pioreactor_unit": metadata.pioreactor_unit,
"timestamp": payload["timestamp"],
"measured_rpm": payload["rpm"],
}
topics_to_tables = [
TopicToParserToTable(
"pioreactor/+/+/growth_rate_calculating/od_filtered",
parse_od_filtered,
"od_readings_filtered",
),
TopicToParserToTable(
"pioreactor/+/+/od_reading/od_raw/+", parse_od, "od_readings_raw"
),
TopicToParserToTable(
"pioreactor/+/+/dosing_events", parse_dosing_events, "dosing_events"
),
TopicToParserToTable("pioreactor/+/+/led_events", parse_led_events, "led_events"),
TopicToParserToTable(
"pioreactor/+/+/growth_rate_calculating/growth_rate",
parse_growth_rate,
"growth_rates",
),
TopicToParserToTable(
"pioreactor/+/+/temperature_control/temperature",
parse_temperature,
"temperature_readings",
),
TopicToParserToTable(
"pioreactor/+/+/alt_media_calculating/alt_media_fraction",
parse_alt_media_fraction,
"alt_media_fraction",
),
TopicToParserToTable("pioreactor/+/+/logs/+", parse_logs, "logs"),
TopicToParserToTable(
"pioreactor/+/+/dosing_automation/dosing_automation_settings",
parse_automation_settings,
"dosing_automation_settings",
),
TopicToParserToTable(
"pioreactor/+/+/led_automation/led_automation_settings",
parse_automation_settings,
"led_automation_settings",
),
TopicToParserToTable(
"pioreactor/+/+/temperature_automation/temperature_automation_settings",
parse_automation_settings,
"temperature_automation_settings",
),
TopicToParserToTable(
"pioreactor/+/+/growth_rate_calculating/kalman_filter_outputs",
parse_kalman_filter_outputs,
"kalman_filter_outputs",
),
TopicToParserToTable(
"pioreactor/+/+/stirring/measured_rpm", parse_stirring_rates, "stirring_rates"
),
TopicToParserToTable("pioreactor/+/+/od_blank/+", parse_od_blank, "od_blanks"),
]
return MqttToDBStreamer(
topics_to_tables, experiment=UNIVERSAL_EXPERIMENT, unit=get_unit_name()
)
@click.command(name="mqtt_to_db_streaming")
def click_mqtt_to_db_streaming():
job = mqtt_to_db_streaming()
job.block_until_disconnected()
| true | true |
1c31881c2d3be33410dd8fb4917bf88aab4b437b | 23,500 | py | Python | shap/explainers/kernel.py | ajd98/shap | 922fa0fe9f198011edd875289fc68b906ff9f2b8 | [
"MIT"
] | 1 | 2019-05-02T02:53:34.000Z | 2019-05-02T02:53:34.000Z | shap/explainers/kernel.py | ajd98/shap | 922fa0fe9f198011edd875289fc68b906ff9f2b8 | [
"MIT"
] | null | null | null | shap/explainers/kernel.py | ajd98/shap | 922fa0fe9f198011edd875289fc68b906ff9f2b8 | [
"MIT"
] | null | null | null | from iml.common import convert_to_instance, convert_to_model, match_instance_to_data, match_model_to_data, convert_to_instance_with_index
from iml.explanations import AdditiveExplanation
from iml.links import convert_to_link, IdentityLink
from iml.datatypes import convert_to_data, DenseData
from scipy.special import binom
import numpy as np
import pandas as pd
import logging
import copy
import itertools
from sklearn.linear_model import LassoLarsIC, Lasso
from sklearn.cluster import KMeans
from tqdm import tqdm
log = logging.getLogger('shap')
def kmeans(X, k, round_values=True):
""" Summarize a dataset with k mean samples weighted by the number of data points they
each represent.
Parameters
----------
X : numpy.array or pandas.DataFrame
Matrix of data samples to summarize (# samples x # features)
k : int
Number of means to use for approximation.
round_values : bool
For all i, round the ith dimension of each mean sample to match the nearest value
from X[:,i]. This ensures discrete features always get a valid value.
Returns
-------
DenseData object.
"""
group_names = [str(i) for i in range(X.shape[1])]
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
group_names = X.columns
X = X.values
kmeans = KMeans(n_clusters=k, random_state=0).fit(X)
if round_values:
for i in range(k):
for j in range(X.shape[1]):
ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j]))
kmeans.cluster_centers_[i,j] = X[ind,j]
return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_))
class KernelExplainer(object):
"""Uses the Kernel SHAP method to explain the output of any function.
Kernel SHAP is a method that uses a special weighted linear regression
to compute the importance of each feature. The computed importance values
are Shapley values from game theory and also coefficents from a local linear
regression.
Parameters
----------
model : function or iml.Model
User supplied function that takes a matrix of samples (# samples x # features) and
computes a the output of the model for those samples. The output can be a vector
(# samples) or a matrix (# samples x # model outputs).
data : numpy.array or pandas.DataFrame or iml.DenseData
The background dataset to use for integrating out features. To determine the impact
of a feature, that feature is set to "missing" and the change in the model output
is observed. Since most models aren't designed to handle arbitrary missing data at test
time, we simulate "missing" by replacing the feature with the values it takes in the
background dataset. So if the background dataset is a simple sample of all zeros, then
we would approximate a feature being missing by setting it to zero. For small problems
this background datset can be the whole training set, but for larger problems consider
using a single reference value or using the kmeans function to summarize the dataset.
link : "identity" or "logit"
A generalized linear model link to connect the feature importance values to the model
output. Since the feature importance values, phi, sum up to the model output, it often makes
sense to connect them to the ouput with a link function where link(outout) = sum(phi).
If the model output is a probability then the LogitLink link function makes the feature
importance values have log-odds units.
"""
def __init__(self, model, data, link=IdentityLink(), **kwargs):
# convert incoming inputs to standardized iml objects
self.link = convert_to_link(link)
self.model = convert_to_model(model)
self.keep_index = kwargs.get("keep_index", False)
self.data = convert_to_data(data, keep_index=self.keep_index)
match_model_to_data(self.model, self.data)
# enforce our current input type limitations
assert isinstance(self.data, DenseData), "Shap explainer only supports the DenseData input currently."
assert not self.data.transposed, "Shap explainer does not support transposed DenseData currently."
# warn users about large background data sets
if len(self.data.weights) > 100:
log.warning("Using " + str(len(self.data.weights)) + " background data samples could cause " +
"slower run times. Consider using shap.kmeans(data, K) to summarize the background " +
"as K weighted samples.")
# init our parameters
self.N = self.data.data.shape[0]
self.P = self.data.data.shape[1]
self.linkfv = np.vectorize(self.link.f)
self.nsamplesAdded = 0
self.nsamplesRun = 0
# find E_x[f(x)]
if self.keep_index:
model_null = self.model.f(self.data.convert_to_df())
else:
model_null = self.model.f(self.data.data)
if isinstance(model_null, (pd.DataFrame, pd.Series)):
model_null = model_null.values
self.fnull = np.sum((model_null.T * self.data.weights).T, 0)
# see if we have a vector output
self.vector_out = True
if len(self.fnull.shape) == 0:
self.vector_out = False
self.fnull = np.array([self.fnull])
self.D = 1
else:
self.D = self.fnull.shape[0]
def shap_values(self, X, **kwargs):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame
A matrix of samples (# samples x # features) on which to explain the model's output.
nsamples : "auto" or int
Number of times to re-evaluate the model when explaining each prediction. More samples
lead to lower variance estimates of the SHAP values.
l1_reg : "auto" or float
The l1 regularization to use for feature selection (the estimation procedure is based on
a debiased lasso). Set this to zero to remove the feature selection step before estimation.
Returns
-------
For a models with a single output this returns a matrix of SHAP values
(# samples x # features + 1). The last column is the base value of the model, which is
the expected value of the model applied to the background dataset. This causes each row to
sum to the model output for that sample. For models with vector outputs this returns a list
of such matrices, one for each output.
"""
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if self.keep_index:
index_value = X.index.values
index_name = X.index.name
column_name = list(X.columns)
X = X.values
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!"
# single instance
if len(X.shape) == 1:
data = X.reshape((1, X.shape[0]))
if self.keep_index:
data = convert_to_instance_with_index(data, column_name, index_name, index_value)
explanation = self.explain(data, **kwargs)
# vector-output
s = explanation.effects.shape
if len(s) == 2:
outs = [np.zeros(s[0] + 1) for j in range(s[1])]
for j in range(s[1]):
outs[j][:-1] = explanation.effects[:, j]
outs[j][-1] = explanation.base_value[j]
return outs
# single-output
else:
out = np.zeros(s[0] + 1)
out[:-1] = explanation.effects
out[-1] = explanation.base_value
return out
# explain the whole dataset
elif len(X.shape) == 2:
explanations = []
for i in tqdm(range(X.shape[0]), disable=kwargs.get("silent", False)):
data = X[i:i + 1, :]
if self.keep_index:
data = convert_to_instance_with_index(data, column_name, index_value[i:i + 1], index_name)
explanations.append(self.explain(data, **kwargs))
# vector-output
s = explanations[0].effects.shape
if len(s) == 2:
outs = [np.zeros((X.shape[0], s[0] + 1)) for j in range(s[1])]
for i in range(X.shape[0]):
for j in range(s[1]):
outs[j][i, :-1] = explanations[i].effects[:, j]
outs[j][i, -1] = explanations[i].base_value[j]
return outs
# single-output
else:
out = np.zeros((X.shape[0], s[0] + 1))
for i in range(X.shape[0]):
out[i, :-1] = explanations[i].effects
out[i, -1] = explanations[i].base_value
return out
def explain(self, incoming_instance, **kwargs):
# convert incoming input to a standardized iml object
instance = convert_to_instance(incoming_instance)
match_instance_to_data(instance, self.data)
# find the feature groups we will test. If a feature does not change from its
# current value then we know it doesn't impact the model
self.varyingInds = self.varying_groups(instance.x)
self.varyingFeatureGroups = [self.data.groups[i] for i in self.varyingInds]
self.M = len(self.varyingFeatureGroups)
# find f(x)
if self.keep_index:
model_out = self.model.f(instance.convert_to_df())
else:
model_out = self.model.f(instance.x)
if isinstance(model_out, (pd.DataFrame, pd.Series)):
model_out = model_out.values[0]
self.fx = model_out[0]
if not self.vector_out:
self.fx = np.array([self.fx])
# if no features vary then there no feature has an effect
if self.M == 0:
phi = np.zeros((len(self.data.groups), self.D))
phi_var = np.zeros((len(self.data.groups), self.D))
# if only one feature varies then it has all the effect
elif self.M == 1:
phi = np.zeros((len(self.data.groups), self.D))
phi_var = np.zeros((len(self.data.groups), self.D))
diff = self.link.f(self.fx) - self.link.f(self.fnull)
for d in range(self.D):
phi[self.varyingInds[0],d] = diff[d]
# if more than one feature varies then we have to do real work
else:
self.l1_reg = kwargs.get("l1_reg", "auto")
# pick a reasonable number of samples if the user didn't specify how many they wanted
self.nsamples = kwargs.get("nsamples", "auto")
if self.nsamples == "auto":
self.nsamples = 2 * self.M + 2**11
# if we have enough samples to enumerate all subsets then ignore the unneeded samples
self.max_samples = 2 ** 30
if self.M <= 30:
self.max_samples = 2 ** self.M - 2
if self.nsamples > self.max_samples:
self.nsamples = self.max_samples
# reserve space for some of our computations
self.allocate()
# weight the different subset sizes
num_subset_sizes = np.int(np.ceil((self.M - 1) / 2.0))
num_paired_subset_sizes = np.int(np.floor((self.M - 1) / 2.0))
weight_vector = np.array([(self.M - 1.0) / (i * (self.M - i)) for i in range(1, num_subset_sizes + 1)])
weight_vector[:num_paired_subset_sizes] *= 2
weight_vector /= np.sum(weight_vector)
log.debug("weight_vector = {0}".format(weight_vector))
log.debug("num_subset_sizes = {0}".format(num_subset_sizes))
log.debug("num_paired_subset_sizes = {0}".format(num_paired_subset_sizes))
log.debug("M = {0}".format(self.M))
# fill out all the subset sizes we can completely enumerate
# given nsamples*remaining_weight_vector[subset_size]
num_full_subsets = 0
num_samples_left = self.nsamples
group_inds = np.arange(self.M, dtype='int64')
mask = np.zeros(self.M)
remaining_weight_vector = copy.copy(weight_vector)
for subset_size in range(1, num_subset_sizes + 1):
# determine how many subsets (and their complements) are of the current size
nsubsets = binom(self.M, subset_size)
if subset_size <= num_paired_subset_sizes: nsubsets *= 2
log.debug("subset_size = {0}".format(subset_size))
log.debug("nsubsets = {0}".format(nsubsets))
log.debug("self.nsamples*weight_vector[subset_size-1] = {0}".format(
num_samples_left * remaining_weight_vector[subset_size - 1]))
log.debug("self.nsamples*weight_vector[subset_size-1/nsubsets = {0}".format(
num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets))
# see if we have enough samples to enumerate all subsets of this size
if num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets >= 1.0 - 1e-8:
num_full_subsets += 1
num_samples_left -= nsubsets
# rescale what's left of the remaining weight vector to sum to 1
if remaining_weight_vector[subset_size - 1] < 1.0:
remaining_weight_vector /= (1 - remaining_weight_vector[subset_size - 1])
# add all the samples of the current subset size
w = weight_vector[subset_size - 1] / binom(self.M, subset_size)
if subset_size <= num_paired_subset_sizes: w /= 2.0
for inds in itertools.combinations(group_inds, subset_size):
mask[:] = 0.0
mask[np.array(inds, dtype='int64')] = 1.0
self.addsample(instance.x, mask, w)
if subset_size <= num_paired_subset_sizes:
mask[:] = np.abs(mask - 1)
self.addsample(instance.x, mask, w)
else:
break
log.info("num_full_subsets = {0}".format(num_full_subsets))
# add random samples from what is left of the subset space
samples_left = self.nsamples - self.nsamplesAdded
log.debug("samples_left = {0}".format(samples_left))
if num_full_subsets != num_subset_sizes:
weight_left = np.sum(weight_vector[num_full_subsets:])
rand_sample_weight = weight_left / samples_left
log.info("weight_left = {0}".format(weight_left))
log.info("rand_sample_weight = {0}".format(rand_sample_weight))
remaining_weight_vector = weight_vector[num_full_subsets:]
remaining_weight_vector /= np.sum(remaining_weight_vector)
log.info("remaining_weight_vector = {0}".format(remaining_weight_vector))
log.info("num_paired_subset_sizes = {0}".format(num_paired_subset_sizes))
ind_set = np.arange(len(remaining_weight_vector))
while samples_left > 0:
mask[:] = 0.0
np.random.shuffle(group_inds)
ind = np.random.choice(ind_set, 1, p=remaining_weight_vector)[0]
mask[group_inds[:ind + num_full_subsets + 1]] = 1.0
samples_left -= 1
self.addsample(instance.x, mask, rand_sample_weight)
# add the compliment sample
if samples_left > 0:
mask -= 1.0
mask[:] = np.abs(mask)
self.addsample(instance.x, mask, rand_sample_weight)
samples_left -= 1
# execute the model on the synthetic samples we have created
self.run()
# solve then expand the feature importance (Shapley value) vector to contain the non-varying features
phi = np.zeros((len(self.data.groups), self.D))
phi_var = np.zeros((len(self.data.groups), self.D))
for d in range(self.D):
vphi, vphi_var = self.solve(self.nsamples / self.max_samples, d)
phi[self.varyingInds, d] = vphi
phi_var[self.varyingInds, d] = vphi_var
if not self.vector_out:
phi = np.squeeze(phi, axis=1)
phi_var = np.squeeze(phi_var, axis=1)
# return the Shapley values along with variances of the estimates
# note that if features were eliminated by l1 regression their
# variance will be 0, even though they are not perfectly known
return AdditiveExplanation(
self.link.f(self.fnull if self.vector_out else self.fnull[0]),
self.link.f(self.fx if self.vector_out else self.fx[0]),
phi, phi_var, instance, self.link, self.model, self.data
)
def varying_groups(self, x):
varying = np.zeros(len(self.data.groups))
for i in range(0, len(self.data.groups)):
inds = self.data.groups[i]
num_matches = sum(np.abs(x[0, inds] - self.data.data[:, inds]) < 1e-7, 0)
varying[i] = sum(num_matches != len(inds) * self.data.data.shape[0])
return np.nonzero(varying)[0]
def allocate(self):
self.synth_data = np.zeros((self.nsamples * self.N, self.P))
self.maskMatrix = np.zeros((self.nsamples, self.M))
self.kernelWeights = np.zeros(self.nsamples)
self.y = np.zeros((self.nsamples * self.N, self.D))
self.ey = np.zeros((self.nsamples, self.D))
self.lastMask = np.zeros(self.nsamples)
self.nsamplesAdded = 0
self.nsamplesRun = 0
if self.keep_index:
self.synth_data_index = [None] * (self.nsamples * self.N)
def addsample(self, x, m, w):
offset = self.nsamplesAdded * self.N
for i in range(self.N):
if self.keep_index:
self.synth_data_index[offset+i] = self.data.index_value[i]
for j in range(self.M):
for k in self.varyingFeatureGroups[j]:
if m[j] == 1.0:
self.synth_data[offset + i, k] = x[0, k]
else:
self.synth_data[offset + i, k] = self.data.data[i, k]
self.maskMatrix[self.nsamplesAdded, :] = m
self.kernelWeights[self.nsamplesAdded] = w
self.nsamplesAdded += 1
def run(self):
num_to_run = self.nsamplesAdded * self.N - self.nsamplesRun * self.N
data = self.synth_data[self.nsamplesRun*self.N:self.nsamplesAdded*self.N,:]
if self.keep_index:
index = self.synth_data_index[self.nsamplesRun*self.N:self.nsamplesAdded*self.N]
index = pd.DataFrame(index, columns=[self.data.index_name])
data = pd.DataFrame(data, columns=self.data.group_names)
data = pd.concat([index, data], axis=1).set_index(self.data.index_name)
modelOut = self.model.f(data)
if isinstance(modelOut, (pd.DataFrame, pd.Series)):
modelOut = modelOut.values
# if len(modelOut.shape) > 1:
# raise ValueError("The supplied model function should output a vector not a matrix!")
self.y[self.nsamplesRun * self.N:self.nsamplesAdded * self.N, :] = np.reshape(modelOut, (num_to_run, self.D))
# find the expected value of each output
for i in range(self.nsamplesRun, self.nsamplesAdded):
eyVal = np.zeros(self.D)
for j in range(0, self.N):
eyVal += self.y[i * self.N + j, :] * self.data.weights[j]
self.ey[i, :] = eyVal
self.nsamplesRun += 1
def solve(self, fraction_evaluated, dim):
eyAdj = self.linkfv(self.ey[:, dim]) - self.link.f(self.fnull[dim])
s = np.sum(self.maskMatrix, 1)
# do feature selection if we have not well enumerated the space
nonzero_inds = np.arange(self.M)
log.debug("fraction_evaluated = {0}".format(fraction_evaluated))
if (self.l1_reg not in ["auto", False, 0]) or (fraction_evaluated < 0.2 and self.l1_reg == "auto"):
w_aug = np.hstack((self.kernelWeights * (self.M - s), self.kernelWeights * s))
log.info("np.sum(w_aug) = {0}".format(np.sum(w_aug)))
log.info("np.sum(self.kernelWeights) = {0}".format(np.sum(self.kernelWeights)))
w_sqrt_aug = np.sqrt(w_aug)
eyAdj_aug = np.hstack((eyAdj, eyAdj - (self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim]))))
eyAdj_aug *= w_sqrt_aug
mask_aug = np.transpose(w_sqrt_aug * np.transpose(np.vstack((self.maskMatrix, self.maskMatrix - 1))))
var_norms = np.array([np.linalg.norm(mask_aug[:, i]) for i in range(mask_aug.shape[1])])
if self.l1_reg == "auto":
model = LassoLarsIC(criterion="aic")
elif self.l1_reg == "bic" or self.l1_reg == "aic":
model = LassoLarsIC(criterion=self.l1_reg)
else:
model = Lasso(alpha=self.l1_reg)
model.fit(mask_aug, eyAdj_aug)
nonzero_inds = np.nonzero(model.coef_)[0]
if len(nonzero_inds) == 0:
return np.zeros(self.M), np.ones(self.M)
# eliminate one variable with the constraint that all features sum to the output
eyAdj2 = eyAdj - self.maskMatrix[:, nonzero_inds[-1]] * (
self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim]))
etmp = np.transpose(np.transpose(self.maskMatrix[:, nonzero_inds[:-1]]) - self.maskMatrix[:, nonzero_inds[-1]])
log.debug("etmp[:4,:] {0}".format(etmp[:4, :]))
# solve a weighted least squares equation to estimate phi
tmp = np.transpose(np.transpose(etmp) * np.transpose(self.kernelWeights))
tmp2 = np.linalg.inv(np.dot(np.transpose(tmp), etmp))
w = np.dot(tmp2, np.dot(np.transpose(tmp), eyAdj2))
log.debug("np.sum(w) = {0}".format(np.sum(w)))
log.debug("self.link(self.fx) - self.link(self.fnull) = {0}".format(
self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim])))
log.debug("self.fx = {0}".format(self.fx[dim]))
log.debug("self.link(self.fx) = {0}".format(self.link.f(self.fx[dim])))
log.debug("self.fnull = {0}".format(self.fnull[dim]))
log.debug("self.link(self.fnull) = {0}".format(self.link.f(self.fnull[dim])))
phi = np.zeros(self.M)
phi[nonzero_inds[:-1]] = w
phi[nonzero_inds[-1]] = (self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim])) - sum(w)
log.info("phi = {0}".format(phi))
# clean up any rounding errors
for i in range(self.M):
if np.abs(phi[i]) < 1e-10:
phi[i] = 0
return phi, np.ones(len(phi))
| 47.188755 | 137 | 0.595064 | from iml.common import convert_to_instance, convert_to_model, match_instance_to_data, match_model_to_data, convert_to_instance_with_index
from iml.explanations import AdditiveExplanation
from iml.links import convert_to_link, IdentityLink
from iml.datatypes import convert_to_data, DenseData
from scipy.special import binom
import numpy as np
import pandas as pd
import logging
import copy
import itertools
from sklearn.linear_model import LassoLarsIC, Lasso
from sklearn.cluster import KMeans
from tqdm import tqdm
log = logging.getLogger('shap')
def kmeans(X, k, round_values=True):
group_names = [str(i) for i in range(X.shape[1])]
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
group_names = X.columns
X = X.values
kmeans = KMeans(n_clusters=k, random_state=0).fit(X)
if round_values:
for i in range(k):
for j in range(X.shape[1]):
ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j]))
kmeans.cluster_centers_[i,j] = X[ind,j]
return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_))
class KernelExplainer(object):
def __init__(self, model, data, link=IdentityLink(), **kwargs):
self.link = convert_to_link(link)
self.model = convert_to_model(model)
self.keep_index = kwargs.get("keep_index", False)
self.data = convert_to_data(data, keep_index=self.keep_index)
match_model_to_data(self.model, self.data)
assert isinstance(self.data, DenseData), "Shap explainer only supports the DenseData input currently."
assert not self.data.transposed, "Shap explainer does not support transposed DenseData currently."
if len(self.data.weights) > 100:
log.warning("Using " + str(len(self.data.weights)) + " background data samples could cause " +
"slower run times. Consider using shap.kmeans(data, K) to summarize the background " +
"as K weighted samples.")
self.N = self.data.data.shape[0]
self.P = self.data.data.shape[1]
self.linkfv = np.vectorize(self.link.f)
self.nsamplesAdded = 0
self.nsamplesRun = 0
if self.keep_index:
model_null = self.model.f(self.data.convert_to_df())
else:
model_null = self.model.f(self.data.data)
if isinstance(model_null, (pd.DataFrame, pd.Series)):
model_null = model_null.values
self.fnull = np.sum((model_null.T * self.data.weights).T, 0)
self.vector_out = True
if len(self.fnull.shape) == 0:
self.vector_out = False
self.fnull = np.array([self.fnull])
self.D = 1
else:
self.D = self.fnull.shape[0]
def shap_values(self, X, **kwargs):
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if self.keep_index:
index_value = X.index.values
index_name = X.index.name
column_name = list(X.columns)
X = X.values
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!"
# single instance
if len(X.shape) == 1:
data = X.reshape((1, X.shape[0]))
if self.keep_index:
data = convert_to_instance_with_index(data, column_name, index_name, index_value)
explanation = self.explain(data, **kwargs)
# vector-output
s = explanation.effects.shape
if len(s) == 2:
outs = [np.zeros(s[0] + 1) for j in range(s[1])]
for j in range(s[1]):
outs[j][:-1] = explanation.effects[:, j]
outs[j][-1] = explanation.base_value[j]
return outs
# single-output
else:
out = np.zeros(s[0] + 1)
out[:-1] = explanation.effects
out[-1] = explanation.base_value
return out
# explain the whole dataset
elif len(X.shape) == 2:
explanations = []
for i in tqdm(range(X.shape[0]), disable=kwargs.get("silent", False)):
data = X[i:i + 1, :]
if self.keep_index:
data = convert_to_instance_with_index(data, column_name, index_value[i:i + 1], index_name)
explanations.append(self.explain(data, **kwargs))
# vector-output
s = explanations[0].effects.shape
if len(s) == 2:
outs = [np.zeros((X.shape[0], s[0] + 1)) for j in range(s[1])]
for i in range(X.shape[0]):
for j in range(s[1]):
outs[j][i, :-1] = explanations[i].effects[:, j]
outs[j][i, -1] = explanations[i].base_value[j]
return outs
# single-output
else:
out = np.zeros((X.shape[0], s[0] + 1))
for i in range(X.shape[0]):
out[i, :-1] = explanations[i].effects
out[i, -1] = explanations[i].base_value
return out
def explain(self, incoming_instance, **kwargs):
# convert incoming input to a standardized iml object
instance = convert_to_instance(incoming_instance)
match_instance_to_data(instance, self.data)
# find the feature groups we will test. If a feature does not change from its
# current value then we know it doesn't impact the model
self.varyingInds = self.varying_groups(instance.x)
self.varyingFeatureGroups = [self.data.groups[i] for i in self.varyingInds]
self.M = len(self.varyingFeatureGroups)
if self.keep_index:
model_out = self.model.f(instance.convert_to_df())
else:
model_out = self.model.f(instance.x)
if isinstance(model_out, (pd.DataFrame, pd.Series)):
model_out = model_out.values[0]
self.fx = model_out[0]
if not self.vector_out:
self.fx = np.array([self.fx])
if self.M == 0:
phi = np.zeros((len(self.data.groups), self.D))
phi_var = np.zeros((len(self.data.groups), self.D))
elif self.M == 1:
phi = np.zeros((len(self.data.groups), self.D))
phi_var = np.zeros((len(self.data.groups), self.D))
diff = self.link.f(self.fx) - self.link.f(self.fnull)
for d in range(self.D):
phi[self.varyingInds[0],d] = diff[d]
else:
self.l1_reg = kwargs.get("l1_reg", "auto")
self.nsamples = kwargs.get("nsamples", "auto")
if self.nsamples == "auto":
self.nsamples = 2 * self.M + 2**11
# if we have enough samples to enumerate all subsets then ignore the unneeded samples
self.max_samples = 2 ** 30
if self.M <= 30:
self.max_samples = 2 ** self.M - 2
if self.nsamples > self.max_samples:
self.nsamples = self.max_samples
# reserve space for some of our computations
self.allocate()
# weight the different subset sizes
num_subset_sizes = np.int(np.ceil((self.M - 1) / 2.0))
num_paired_subset_sizes = np.int(np.floor((self.M - 1) / 2.0))
weight_vector = np.array([(self.M - 1.0) / (i * (self.M - i)) for i in range(1, num_subset_sizes + 1)])
weight_vector[:num_paired_subset_sizes] *= 2
weight_vector /= np.sum(weight_vector)
log.debug("weight_vector = {0}".format(weight_vector))
log.debug("num_subset_sizes = {0}".format(num_subset_sizes))
log.debug("num_paired_subset_sizes = {0}".format(num_paired_subset_sizes))
log.debug("M = {0}".format(self.M))
# fill out all the subset sizes we can completely enumerate
# given nsamples*remaining_weight_vector[subset_size]
num_full_subsets = 0
num_samples_left = self.nsamples
group_inds = np.arange(self.M, dtype='int64')
mask = np.zeros(self.M)
remaining_weight_vector = copy.copy(weight_vector)
for subset_size in range(1, num_subset_sizes + 1):
# determine how many subsets (and their complements) are of the current size
nsubsets = binom(self.M, subset_size)
if subset_size <= num_paired_subset_sizes: nsubsets *= 2
log.debug("subset_size = {0}".format(subset_size))
log.debug("nsubsets = {0}".format(nsubsets))
log.debug("self.nsamples*weight_vector[subset_size-1] = {0}".format(
num_samples_left * remaining_weight_vector[subset_size - 1]))
log.debug("self.nsamples*weight_vector[subset_size-1/nsubsets = {0}".format(
num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets))
# see if we have enough samples to enumerate all subsets of this size
if num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets >= 1.0 - 1e-8:
num_full_subsets += 1
num_samples_left -= nsubsets
# rescale what's left of the remaining weight vector to sum to 1
if remaining_weight_vector[subset_size - 1] < 1.0:
remaining_weight_vector /= (1 - remaining_weight_vector[subset_size - 1])
w = weight_vector[subset_size - 1] / binom(self.M, subset_size)
if subset_size <= num_paired_subset_sizes: w /= 2.0
for inds in itertools.combinations(group_inds, subset_size):
mask[:] = 0.0
mask[np.array(inds, dtype='int64')] = 1.0
self.addsample(instance.x, mask, w)
if subset_size <= num_paired_subset_sizes:
mask[:] = np.abs(mask - 1)
self.addsample(instance.x, mask, w)
else:
break
log.info("num_full_subsets = {0}".format(num_full_subsets))
samples_left = self.nsamples - self.nsamplesAdded
log.debug("samples_left = {0}".format(samples_left))
if num_full_subsets != num_subset_sizes:
weight_left = np.sum(weight_vector[num_full_subsets:])
rand_sample_weight = weight_left / samples_left
log.info("weight_left = {0}".format(weight_left))
log.info("rand_sample_weight = {0}".format(rand_sample_weight))
remaining_weight_vector = weight_vector[num_full_subsets:]
remaining_weight_vector /= np.sum(remaining_weight_vector)
log.info("remaining_weight_vector = {0}".format(remaining_weight_vector))
log.info("num_paired_subset_sizes = {0}".format(num_paired_subset_sizes))
ind_set = np.arange(len(remaining_weight_vector))
while samples_left > 0:
mask[:] = 0.0
np.random.shuffle(group_inds)
ind = np.random.choice(ind_set, 1, p=remaining_weight_vector)[0]
mask[group_inds[:ind + num_full_subsets + 1]] = 1.0
samples_left -= 1
self.addsample(instance.x, mask, rand_sample_weight)
if samples_left > 0:
mask -= 1.0
mask[:] = np.abs(mask)
self.addsample(instance.x, mask, rand_sample_weight)
samples_left -= 1
self.run()
phi = np.zeros((len(self.data.groups), self.D))
phi_var = np.zeros((len(self.data.groups), self.D))
for d in range(self.D):
vphi, vphi_var = self.solve(self.nsamples / self.max_samples, d)
phi[self.varyingInds, d] = vphi
phi_var[self.varyingInds, d] = vphi_var
if not self.vector_out:
phi = np.squeeze(phi, axis=1)
phi_var = np.squeeze(phi_var, axis=1)
return AdditiveExplanation(
self.link.f(self.fnull if self.vector_out else self.fnull[0]),
self.link.f(self.fx if self.vector_out else self.fx[0]),
phi, phi_var, instance, self.link, self.model, self.data
)
def varying_groups(self, x):
varying = np.zeros(len(self.data.groups))
for i in range(0, len(self.data.groups)):
inds = self.data.groups[i]
num_matches = sum(np.abs(x[0, inds] - self.data.data[:, inds]) < 1e-7, 0)
varying[i] = sum(num_matches != len(inds) * self.data.data.shape[0])
return np.nonzero(varying)[0]
def allocate(self):
self.synth_data = np.zeros((self.nsamples * self.N, self.P))
self.maskMatrix = np.zeros((self.nsamples, self.M))
self.kernelWeights = np.zeros(self.nsamples)
self.y = np.zeros((self.nsamples * self.N, self.D))
self.ey = np.zeros((self.nsamples, self.D))
self.lastMask = np.zeros(self.nsamples)
self.nsamplesAdded = 0
self.nsamplesRun = 0
if self.keep_index:
self.synth_data_index = [None] * (self.nsamples * self.N)
def addsample(self, x, m, w):
offset = self.nsamplesAdded * self.N
for i in range(self.N):
if self.keep_index:
self.synth_data_index[offset+i] = self.data.index_value[i]
for j in range(self.M):
for k in self.varyingFeatureGroups[j]:
if m[j] == 1.0:
self.synth_data[offset + i, k] = x[0, k]
else:
self.synth_data[offset + i, k] = self.data.data[i, k]
self.maskMatrix[self.nsamplesAdded, :] = m
self.kernelWeights[self.nsamplesAdded] = w
self.nsamplesAdded += 1
def run(self):
num_to_run = self.nsamplesAdded * self.N - self.nsamplesRun * self.N
data = self.synth_data[self.nsamplesRun*self.N:self.nsamplesAdded*self.N,:]
if self.keep_index:
index = self.synth_data_index[self.nsamplesRun*self.N:self.nsamplesAdded*self.N]
index = pd.DataFrame(index, columns=[self.data.index_name])
data = pd.DataFrame(data, columns=self.data.group_names)
data = pd.concat([index, data], axis=1).set_index(self.data.index_name)
modelOut = self.model.f(data)
if isinstance(modelOut, (pd.DataFrame, pd.Series)):
modelOut = modelOut.values
self.y[self.nsamplesRun * self.N:self.nsamplesAdded * self.N, :] = np.reshape(modelOut, (num_to_run, self.D))
for i in range(self.nsamplesRun, self.nsamplesAdded):
eyVal = np.zeros(self.D)
for j in range(0, self.N):
eyVal += self.y[i * self.N + j, :] * self.data.weights[j]
self.ey[i, :] = eyVal
self.nsamplesRun += 1
def solve(self, fraction_evaluated, dim):
eyAdj = self.linkfv(self.ey[:, dim]) - self.link.f(self.fnull[dim])
s = np.sum(self.maskMatrix, 1)
nonzero_inds = np.arange(self.M)
log.debug("fraction_evaluated = {0}".format(fraction_evaluated))
if (self.l1_reg not in ["auto", False, 0]) or (fraction_evaluated < 0.2 and self.l1_reg == "auto"):
w_aug = np.hstack((self.kernelWeights * (self.M - s), self.kernelWeights * s))
log.info("np.sum(w_aug) = {0}".format(np.sum(w_aug)))
log.info("np.sum(self.kernelWeights) = {0}".format(np.sum(self.kernelWeights)))
w_sqrt_aug = np.sqrt(w_aug)
eyAdj_aug = np.hstack((eyAdj, eyAdj - (self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim]))))
eyAdj_aug *= w_sqrt_aug
mask_aug = np.transpose(w_sqrt_aug * np.transpose(np.vstack((self.maskMatrix, self.maskMatrix - 1))))
var_norms = np.array([np.linalg.norm(mask_aug[:, i]) for i in range(mask_aug.shape[1])])
if self.l1_reg == "auto":
model = LassoLarsIC(criterion="aic")
elif self.l1_reg == "bic" or self.l1_reg == "aic":
model = LassoLarsIC(criterion=self.l1_reg)
else:
model = Lasso(alpha=self.l1_reg)
model.fit(mask_aug, eyAdj_aug)
nonzero_inds = np.nonzero(model.coef_)[0]
if len(nonzero_inds) == 0:
return np.zeros(self.M), np.ones(self.M)
eyAdj2 = eyAdj - self.maskMatrix[:, nonzero_inds[-1]] * (
self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim]))
etmp = np.transpose(np.transpose(self.maskMatrix[:, nonzero_inds[:-1]]) - self.maskMatrix[:, nonzero_inds[-1]])
log.debug("etmp[:4,:] {0}".format(etmp[:4, :]))
tmp = np.transpose(np.transpose(etmp) * np.transpose(self.kernelWeights))
tmp2 = np.linalg.inv(np.dot(np.transpose(tmp), etmp))
w = np.dot(tmp2, np.dot(np.transpose(tmp), eyAdj2))
log.debug("np.sum(w) = {0}".format(np.sum(w)))
log.debug("self.link(self.fx) - self.link(self.fnull) = {0}".format(
self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim])))
log.debug("self.fx = {0}".format(self.fx[dim]))
log.debug("self.link(self.fx) = {0}".format(self.link.f(self.fx[dim])))
log.debug("self.fnull = {0}".format(self.fnull[dim]))
log.debug("self.link(self.fnull) = {0}".format(self.link.f(self.fnull[dim])))
phi = np.zeros(self.M)
phi[nonzero_inds[:-1]] = w
phi[nonzero_inds[-1]] = (self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim])) - sum(w)
log.info("phi = {0}".format(phi))
for i in range(self.M):
if np.abs(phi[i]) < 1e-10:
phi[i] = 0
return phi, np.ones(len(phi))
| true | true |
1c3189741bb21fc15cd9663b6fdad2475ea85267 | 4,214 | py | Python | UpdateStubs.py | HelpMeIAmLost/IFTestSuite37 | 9c854d6d2f4b6d5604ff2aa12e1fee21365da350 | [
"MIT"
] | null | null | null | UpdateStubs.py | HelpMeIAmLost/IFTestSuite37 | 9c854d6d2f4b6d5604ff2aa12e1fee21365da350 | [
"MIT"
] | null | null | null | UpdateStubs.py | HelpMeIAmLost/IFTestSuite37 | 9c854d6d2f4b6d5604ff2aa12e1fee21365da350 | [
"MIT"
] | null | null | null | # coding: utf-8
from common_util import *
from pathlib import Path
import os
import argparse
import sys
MIN_PYTHON = (3, 7)
data_handler = {'declarations': ['GlobalDeclarationsList.xlsx', 'Global Declarations', 'A:B', OFF],
'functions': ['RTEFunctionCalls.xlsx', 'RTE Function Calls', 'A:B', OFF]}
def filter_data(stubs_folder):
for root, dirs, files in os.walk(Path(stubs_folder)):
for file in files:
if file.endswith('.c'):
module_name = file[:-2].lower()
filename = os.path.join(root, file)
declarations_data_frame = read_excel_file(data_handler['declarations'][0],
data_handler['declarations'][1:4])
declarations_data_frame['TargetModule'] = declarations_data_frame.TargetModule.astype(str).str.lower()
declarations_filtered_data = declarations_data_frame[
declarations_data_frame['TargetModule'] == module_name
]
if len(declarations_filtered_data.head(1)) == 0:
print('No global declarations for {}'.format(file[:-2]))
else:
string = '<< Start of include and declaration area >>'
column_name = 'Declarations'
skip_count = 2
if module_name == 'acc_main' or module_name == 'acc_50ms':
skip_count += 3
spaces = ''
success = insert_lines_of_code('declarations', filename, declarations_filtered_data[column_name],
string, skip_count, spaces)
if success:
print('Finished inserting global declarations for {}'.format(file[:-2]))
else:
print('Failed to insert global declarations for {}'.format(file[:-2]))
functions_data_frame = read_excel_file(data_handler['functions'][0], data_handler['functions'][1:4])
functions_data_frame['TargetModule'] = functions_data_frame.TargetModule.astype(str).str.lower()
functions_filtered_data = functions_data_frame[functions_data_frame['TargetModule'] == module_name]
if len(functions_filtered_data.head(1)) == 0:
print('No RTE read/write calls for {}'.format(file[:-2]))
else:
string = '<< Start of runnable implementation >>'
column_name = 'FunctionCalls'
skip_count = 3
if module_name == 'acc_main' or module_name == 'acc_50ms':
skip_count += 3
spaces = ' '
success = insert_lines_of_code('functions', filename, functions_filtered_data[column_name],
string, skip_count, spaces)
if success:
print('Finished inserting RTE read and write function calls for {}'.format(file[:-2]))
else:
print('Failed to insert RTE read and write function calls for {}'.format(file[:-2]))
if sys.version_info < MIN_PYTHON:
sys.exit("Python %s.%s or later is required. Please check your Python version.\n" % MIN_PYTHON)
# Read arguments
parser = argparse.ArgumentParser()
parser.add_argument('-s', dest='stubs_folder', help='stubs folder', default='Stubs/')
args = parser.parse_args()
if not os.path.exists(args.stubs_folder):
print('{} not found!'.format(args.stubs_folder))
print('Please make sure {} is in the script folder!'.format(args.stubs_folder))
elif not os.path.exists(data_handler['declarations'][0]):
print('{} not found!'.format(data_handler['declarations'][0]))
print('Please run the PrepareData script first!')
elif not os.path.exists(data_handler['functions'][0]):
print('{} not found!'.format(data_handler['functions'][0]))
print('Please run the PrepareData script first!')
else:
filter_data(args.stubs_folder)
| 47.348315 | 119 | 0.565733 |
from common_util import *
from pathlib import Path
import os
import argparse
import sys
MIN_PYTHON = (3, 7)
data_handler = {'declarations': ['GlobalDeclarationsList.xlsx', 'Global Declarations', 'A:B', OFF],
'functions': ['RTEFunctionCalls.xlsx', 'RTE Function Calls', 'A:B', OFF]}
def filter_data(stubs_folder):
for root, dirs, files in os.walk(Path(stubs_folder)):
for file in files:
if file.endswith('.c'):
module_name = file[:-2].lower()
filename = os.path.join(root, file)
declarations_data_frame = read_excel_file(data_handler['declarations'][0],
data_handler['declarations'][1:4])
declarations_data_frame['TargetModule'] = declarations_data_frame.TargetModule.astype(str).str.lower()
declarations_filtered_data = declarations_data_frame[
declarations_data_frame['TargetModule'] == module_name
]
if len(declarations_filtered_data.head(1)) == 0:
print('No global declarations for {}'.format(file[:-2]))
else:
string = '<< Start of include and declaration area >>'
column_name = 'Declarations'
skip_count = 2
if module_name == 'acc_main' or module_name == 'acc_50ms':
skip_count += 3
spaces = ''
success = insert_lines_of_code('declarations', filename, declarations_filtered_data[column_name],
string, skip_count, spaces)
if success:
print('Finished inserting global declarations for {}'.format(file[:-2]))
else:
print('Failed to insert global declarations for {}'.format(file[:-2]))
functions_data_frame = read_excel_file(data_handler['functions'][0], data_handler['functions'][1:4])
functions_data_frame['TargetModule'] = functions_data_frame.TargetModule.astype(str).str.lower()
functions_filtered_data = functions_data_frame[functions_data_frame['TargetModule'] == module_name]
if len(functions_filtered_data.head(1)) == 0:
print('No RTE read/write calls for {}'.format(file[:-2]))
else:
string = '<< Start of runnable implementation >>'
column_name = 'FunctionCalls'
skip_count = 3
if module_name == 'acc_main' or module_name == 'acc_50ms':
skip_count += 3
spaces = ' '
success = insert_lines_of_code('functions', filename, functions_filtered_data[column_name],
string, skip_count, spaces)
if success:
print('Finished inserting RTE read and write function calls for {}'.format(file[:-2]))
else:
print('Failed to insert RTE read and write function calls for {}'.format(file[:-2]))
if sys.version_info < MIN_PYTHON:
sys.exit("Python %s.%s or later is required. Please check your Python version.\n" % MIN_PYTHON)
parser = argparse.ArgumentParser()
parser.add_argument('-s', dest='stubs_folder', help='stubs folder', default='Stubs/')
args = parser.parse_args()
if not os.path.exists(args.stubs_folder):
print('{} not found!'.format(args.stubs_folder))
print('Please make sure {} is in the script folder!'.format(args.stubs_folder))
elif not os.path.exists(data_handler['declarations'][0]):
print('{} not found!'.format(data_handler['declarations'][0]))
print('Please run the PrepareData script first!')
elif not os.path.exists(data_handler['functions'][0]):
print('{} not found!'.format(data_handler['functions'][0]))
print('Please run the PrepareData script first!')
else:
filter_data(args.stubs_folder)
| true | true |
1c318a018feb3440fc3041c83da4f95cb4284397 | 2,208 | py | Python | setup.py | trhoden/ceph-deploy | 29224648961bdcb3a240a0e5f748a675940d9931 | [
"MIT"
] | null | null | null | setup.py | trhoden/ceph-deploy | 29224648961bdcb3a240a0e5f748a675940d9931 | [
"MIT"
] | null | null | null | setup.py | trhoden/ceph-deploy | 29224648961bdcb3a240a0e5f748a675940d9931 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
import os
import sys
import ceph_deploy
from vendor import vendorize, clean_vendor
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
f = open(path)
return f.read()
install_requires = []
pyversion = sys.version_info[:2]
if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1):
install_requires.append('argparse')
#
# Add libraries that are not part of install_requires but only if we really
# want to, specified by the environment flag
#
if os.environ.get('CEPH_DEPLOY_NO_VENDOR'):
clean_vendor('remoto')
else:
vendorize([
('remoto', '0.0.29', ['python', 'vendor.py']),
])
setup(
name='ceph-deploy',
version=ceph_deploy.__version__,
packages=find_packages(),
author='Inktank',
author_email='ceph-devel@vger.kernel.org',
description='Deploy Ceph with minimal infrastructure',
long_description=read('README.rst'),
license='MIT',
keywords='ceph deploy',
url="https://github.com/ceph/ceph-deploy",
install_requires=[
'setuptools',
] + install_requires,
tests_require=[
'pytest >=2.1.3',
'mock >=1.0b1',
],
entry_points={
'console_scripts': [
'ceph-deploy = ceph_deploy.cli:main',
],
'ceph_deploy.cli': [
'new = ceph_deploy.new:make',
'install = ceph_deploy.install:make',
'uninstall = ceph_deploy.install:make_uninstall',
'purge = ceph_deploy.install:make_purge',
'purgedata = ceph_deploy.install:make_purge_data',
'mon = ceph_deploy.mon:make',
'gatherkeys = ceph_deploy.gatherkeys:make',
'osd = ceph_deploy.osd:make',
'disk = ceph_deploy.osd:make_disk',
'mds = ceph_deploy.mds:make',
'forgetkeys = ceph_deploy.forgetkeys:make',
'config = ceph_deploy.config:make',
'admin = ceph_deploy.admin:make',
'pkg = ceph_deploy.pkg:make',
'calamari = ceph_deploy.calamari:make',
'rgw = ceph_deploy.rgw:make',
'repo = ceph_deploy.repo:make',
],
},
)
| 27.259259 | 75 | 0.602808 | from setuptools import setup, find_packages
import os
import sys
import ceph_deploy
from vendor import vendorize, clean_vendor
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
f = open(path)
return f.read()
install_requires = []
pyversion = sys.version_info[:2]
if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1):
install_requires.append('argparse')
if os.environ.get('CEPH_DEPLOY_NO_VENDOR'):
clean_vendor('remoto')
else:
vendorize([
('remoto', '0.0.29', ['python', 'vendor.py']),
])
setup(
name='ceph-deploy',
version=ceph_deploy.__version__,
packages=find_packages(),
author='Inktank',
author_email='ceph-devel@vger.kernel.org',
description='Deploy Ceph with minimal infrastructure',
long_description=read('README.rst'),
license='MIT',
keywords='ceph deploy',
url="https://github.com/ceph/ceph-deploy",
install_requires=[
'setuptools',
] + install_requires,
tests_require=[
'pytest >=2.1.3',
'mock >=1.0b1',
],
entry_points={
'console_scripts': [
'ceph-deploy = ceph_deploy.cli:main',
],
'ceph_deploy.cli': [
'new = ceph_deploy.new:make',
'install = ceph_deploy.install:make',
'uninstall = ceph_deploy.install:make_uninstall',
'purge = ceph_deploy.install:make_purge',
'purgedata = ceph_deploy.install:make_purge_data',
'mon = ceph_deploy.mon:make',
'gatherkeys = ceph_deploy.gatherkeys:make',
'osd = ceph_deploy.osd:make',
'disk = ceph_deploy.osd:make_disk',
'mds = ceph_deploy.mds:make',
'forgetkeys = ceph_deploy.forgetkeys:make',
'config = ceph_deploy.config:make',
'admin = ceph_deploy.admin:make',
'pkg = ceph_deploy.pkg:make',
'calamari = ceph_deploy.calamari:make',
'rgw = ceph_deploy.rgw:make',
'repo = ceph_deploy.repo:make',
],
},
)
| true | true |
1c318b13dcceed8ba7497aee78a7f1008cef9e08 | 863 | py | Python | Data_Pipelines_with_Apache_Airflow/plugins/operators/load_fact.py | bayatim/udacityDataEngineeringProjects | d3533eaec27c3b6af4d1f4b3e7bf385b3106121f | [
"MIT"
] | 1 | 2021-02-05T10:45:22.000Z | 2021-02-05T10:45:22.000Z | Data_Pipelines_with_Apache_Airflow/plugins/operators/load_fact.py | bayatim/udacityDataEngineeringProjects | d3533eaec27c3b6af4d1f4b3e7bf385b3106121f | [
"MIT"
] | 6 | 2020-12-27T21:30:34.000Z | 2021-02-05T09:10:33.000Z | Data_Pipelines_with_Apache_Airflow/plugins/operators/load_fact.py | bayatim/udacityDataEngineeringProjects | d3533eaec27c3b6af4d1f4b3e7bf385b3106121f | [
"MIT"
] | null | null | null | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadFactOperator(BaseOperator):
ui_color = '#F98866'
@apply_defaults
def __init__(self,
redshift_conn_id="",
sql_query="",
*args, **kwargs):
super(LoadFactOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.sql_query = sql_query
def execute(self, context):
self.log.info('Creating redshift hook')
redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)
self.log.info('Redshift hook is created!')
self.log.info('Start filling fact table')
redshift_hook.run(self.sql_query)
self.log.info('Fact table is loaded!') | 33.192308 | 76 | 0.665122 | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadFactOperator(BaseOperator):
ui_color = '#F98866'
@apply_defaults
def __init__(self,
redshift_conn_id="",
sql_query="",
*args, **kwargs):
super(LoadFactOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.sql_query = sql_query
def execute(self, context):
self.log.info('Creating redshift hook')
redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)
self.log.info('Redshift hook is created!')
self.log.info('Start filling fact table')
redshift_hook.run(self.sql_query)
self.log.info('Fact table is loaded!') | true | true |
1c318b428f1f698dfb5fb04d289d7a56df932024 | 2,082 | py | Python | src/pyams_file/generations/evolve1.py | Py-AMS/pyams-file | a5155791787ba38e221896e9ce7d33177ae8a05b | [
"ZPL-2.1"
] | null | null | null | src/pyams_file/generations/evolve1.py | Py-AMS/pyams-file | a5155791787ba38e221896e9ce7d33177ae8a05b | [
"ZPL-2.1"
] | null | null | null | src/pyams_file/generations/evolve1.py | Py-AMS/pyams-file | a5155791787ba38e221896e9ce7d33177ae8a05b | [
"ZPL-2.1"
] | null | null | null | #
# Copyright (c) 2008-2019 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_file.generations.evolve1 module
This generation module is looking for existing thumbnails and remove them from the database
to avois a bug in previous release which was leading to blob files never being correctly deleted
on database packing.
"""
import logging
from zope.intid import IIntIds
from pyams_file.interfaces import IMediaFile
from pyams_file.interfaces.thumbnail import IThumbnails
from pyams_utils.registry import get_local_registry, get_utility, set_local_registry
__docformat__ = 'restructuredtext'
LOGGER = logging.getLogger('PyAMS (file)')
def evolve(site):
"""Evolve 1: remove all images thumbnails to free blobs"""
registry = get_local_registry()
try:
medias = set()
set_local_registry(site.getSiteManager())
LOGGER.warning("Removing all thumbnails from database to free unused blobs...")
intids = get_utility(IIntIds)
for ref in list(intids.refs.keys()):
obj = intids.queryObject(ref)
if IMediaFile.providedBy(obj):
LOGGER.debug(">>> removing thumbnails for image {!r}".format(obj))
thumbnails = IThumbnails(obj, None)
if thumbnails is not None:
medias.add(obj)
thumbnails.clear_thumbnails()
LOGGER.warning("Thumbnails cleanup is finished. Launch *zeopack* (for ZEO storage) "
"or *zodbpack* (for Relstorage) command to remove all unused blobs.")
LOGGER.warning("{} images updated".format(len(medias)))
finally:
set_local_registry(registry)
| 37.854545 | 96 | 0.70317 |
import logging
from zope.intid import IIntIds
from pyams_file.interfaces import IMediaFile
from pyams_file.interfaces.thumbnail import IThumbnails
from pyams_utils.registry import get_local_registry, get_utility, set_local_registry
__docformat__ = 'restructuredtext'
LOGGER = logging.getLogger('PyAMS (file)')
def evolve(site):
registry = get_local_registry()
try:
medias = set()
set_local_registry(site.getSiteManager())
LOGGER.warning("Removing all thumbnails from database to free unused blobs...")
intids = get_utility(IIntIds)
for ref in list(intids.refs.keys()):
obj = intids.queryObject(ref)
if IMediaFile.providedBy(obj):
LOGGER.debug(">>> removing thumbnails for image {!r}".format(obj))
thumbnails = IThumbnails(obj, None)
if thumbnails is not None:
medias.add(obj)
thumbnails.clear_thumbnails()
LOGGER.warning("Thumbnails cleanup is finished. Launch *zeopack* (for ZEO storage) "
"or *zodbpack* (for Relstorage) command to remove all unused blobs.")
LOGGER.warning("{} images updated".format(len(medias)))
finally:
set_local_registry(registry)
| true | true |
1c318de5ca5090d21826dd36782e53089996028d | 640 | py | Python | klimaat_helpdesk/core/models.py | JoselineHouwman/helpdesk_klimaat | 0b55fefb2fe7bd7ba4f3c042963f3b72dc2e8597 | [
"MIT"
] | null | null | null | klimaat_helpdesk/core/models.py | JoselineHouwman/helpdesk_klimaat | 0b55fefb2fe7bd7ba4f3c042963f3b72dc2e8597 | [
"MIT"
] | null | null | null | klimaat_helpdesk/core/models.py | JoselineHouwman/helpdesk_klimaat | 0b55fefb2fe7bd7ba4f3c042963f3b72dc2e8597 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Question(models.Model):
"""Main element to drive the flow of the website. Questions are what the user can ask and which will trigger the
rest of the actions on the website
"""
question = models.TextField(verbose_name=_('Your Question'), blank=False, null=False)
user_email = models.EmailField(verbose_name=_('User Email'), blank=True, null=True)
asked_by_ip = models.GenericIPAddressField(null=True, blank=True)
date_asked = models.DateTimeField(auto_now_add=True)
approved = models.NullBooleanField(default=None)
| 42.666667 | 116 | 0.757813 | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Question(models.Model):
question = models.TextField(verbose_name=_('Your Question'), blank=False, null=False)
user_email = models.EmailField(verbose_name=_('User Email'), blank=True, null=True)
asked_by_ip = models.GenericIPAddressField(null=True, blank=True)
date_asked = models.DateTimeField(auto_now_add=True)
approved = models.NullBooleanField(default=None)
| true | true |
1c318df7405891dc0ced0fe09975883ce3151a01 | 1,665 | py | Python | tests/service/test_files.py | mralext20/avwx-engine | 4eabc2a4a08cd931d6f0fab7590ea09390af43e2 | [
"MIT"
] | null | null | null | tests/service/test_files.py | mralext20/avwx-engine | 4eabc2a4a08cd931d6f0fab7590ea09390af43e2 | [
"MIT"
] | null | null | null | tests/service/test_files.py | mralext20/avwx-engine | 4eabc2a4a08cd931d6f0fab7590ea09390af43e2 | [
"MIT"
] | null | null | null | """
FileService API Tests
"""
# pylint: disable=protected-access,missing-class-docstring,unidiomatic-typecheck
# library
import pytest
# module
from avwx import exceptions, service
# tests
from .test_base import BaseTestService
class TestScrapeService(BaseTestService):
service_class = service.files.FileService
required_attrs = (
"update_interval",
"_updating",
"last_updated",
"is_outdated",
"update",
)
def test_not_implemented(self):
"""Tests that the base FileService class throws NotImplemented errors"""
if type(self.serv) != service.files.FileService:
return
# pylint: disable=no-member.pointless-statement
with self.assertRaises(NotImplementedError):
self.serv._extract(None, None)
with self.assertRaises(NotImplementedError):
self.serv._urls
@pytest.mark.asyncio
async def test_async_fetch_exceptions(self):
"""Tests async fetch exception handling"""
for station in ("12K", "MAYT"):
with self.assertRaises(exceptions.BadStation):
await self.serv.async_fetch(station)
# Should raise exception due to empty url
if type(self.serv) == service.scrape.ScrapeService:
with self.assertRaises(NotImplementedError):
await self.serv.async_fetch("KJFK")
class TestNBM(TestScrapeService):
service_class = service.NOAA_NBM
report_type = "nbs"
stations = ["KJFK", "KMCO", "PHNL"]
class TestGFS(TestScrapeService):
service_class = service.NOAA_GFS
report_type = "mav"
stations = ["KJFK", "KLAX", "PHNL"]
| 26.854839 | 80 | 0.663664 |
import pytest
from avwx import exceptions, service
from .test_base import BaseTestService
class TestScrapeService(BaseTestService):
service_class = service.files.FileService
required_attrs = (
"update_interval",
"_updating",
"last_updated",
"is_outdated",
"update",
)
def test_not_implemented(self):
if type(self.serv) != service.files.FileService:
return
with self.assertRaises(NotImplementedError):
self.serv._extract(None, None)
with self.assertRaises(NotImplementedError):
self.serv._urls
@pytest.mark.asyncio
async def test_async_fetch_exceptions(self):
for station in ("12K", "MAYT"):
with self.assertRaises(exceptions.BadStation):
await self.serv.async_fetch(station)
if type(self.serv) == service.scrape.ScrapeService:
with self.assertRaises(NotImplementedError):
await self.serv.async_fetch("KJFK")
class TestNBM(TestScrapeService):
service_class = service.NOAA_NBM
report_type = "nbs"
stations = ["KJFK", "KMCO", "PHNL"]
class TestGFS(TestScrapeService):
service_class = service.NOAA_GFS
report_type = "mav"
stations = ["KJFK", "KLAX", "PHNL"]
| true | true |
1c318ed0111d99108964e61ea8f66ed8c3a925c2 | 8,521 | py | Python | salt/payload.py | grischa/salt | e7d95eed5a5d3762c96b89b9a9c685d793881912 | [
"Apache-2.0"
] | 1 | 2015-05-20T16:55:50.000Z | 2015-05-20T16:55:50.000Z | salt/payload.py | grischa/salt | e7d95eed5a5d3762c96b89b9a9c685d793881912 | [
"Apache-2.0"
] | null | null | null | salt/payload.py | grischa/salt | e7d95eed5a5d3762c96b89b9a9c685d793881912 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Many aspects of the salt payload need to be managed, from the return of
encrypted keys to general payload dynamics and packaging, these happen
in here
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.log
import salt.ext.six as six
from salt.exceptions import SaltReqTimeoutError
# Import third party libs
try:
import zmq
except ImportError:
# No need for zeromq in local mode
pass
log = logging.getLogger(__name__)
try:
# Attempt to import msgpack
import msgpack
# There is a serialization issue on ARM and potentially other platforms
# for some msgpack bindings, check for it
if msgpack.loads(msgpack.dumps([1, 2, 3]), use_list=True) is None:
raise ImportError
except ImportError:
# Fall back to msgpack_pure
try:
import msgpack_pure as msgpack
except ImportError:
# TODO: Come up with a sane way to get a configured logfile
# and write to the logfile when this error is hit also
LOG_FORMAT = '[%(levelname)-8s] %(message)s'
salt.log.setup_console_logger(log_format=LOG_FORMAT)
log.fatal('Unable to import msgpack or msgpack_pure python modules')
# Don't exit if msgpack is not available, this is to make local mode
# work without msgpack
#sys.exit(salt.exitcodes.EX_GENERIC)
def package(payload):
'''
This method for now just wraps msgpack.dumps, but it is here so that
we can make the serialization a custom option in the future with ease.
'''
return msgpack.dumps(payload)
def unpackage(package_):
'''
Unpackages a payload
'''
return msgpack.loads(package_, use_list=True)
def format_payload(enc, **kwargs):
'''
Pass in the required arguments for a payload, the enc type and the cmd,
then a list of keyword args to generate the body of the load dict.
'''
payload = {'enc': enc}
load = {}
for key in kwargs:
load[key] = kwargs[key]
payload['load'] = load
return package(payload)
class Serial(object):
'''
Create a serialization object, this object manages all message
serialization in Salt
'''
def __init__(self, opts):
if isinstance(opts, dict):
self.serial = opts.get('serial', 'msgpack')
elif isinstance(opts, str):
self.serial = opts
else:
self.serial = 'msgpack'
def loads(self, msg):
'''
Run the correct loads serialization format
'''
try:
return msgpack.loads(msg, use_list=True)
except Exception as exc:
log.critical('Could not deserialize msgpack message: {0}'
'This often happens when trying to read a file not in binary mode.'
'Please open an issue and include the following error: {1}'.format(msg, exc))
raise
def load(self, fn_):
'''
Run the correct serialization to load a file
'''
data = fn_.read()
fn_.close()
return self.loads(data)
def dumps(self, msg):
'''
Run the correct dumps serialization format
'''
try:
return msgpack.dumps(msg)
except TypeError:
if msgpack.version >= (0, 2, 0):
# Should support OrderedDict serialization, so, let's
# raise the exception
raise
# msgpack is < 0.2.0, let's make its life easier
# Since OrderedDict is identified as a dictionary, we can't
# make use of msgpack custom types, we will need to convert by
# hand.
# This means iterating through all elements of a dictionary or
# list/tuple
def odict_encoder(obj):
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = odict_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = odict_encoder(entry)
return obj
return obj
return msgpack.dumps(odict_encoder(msg))
except SystemError as exc:
log.critical('Unable to serialize message! Consider upgrading msgpack. '
'Message which failed was {failed_message} '
'with exception {exception_message}').format(msg, exc)
def dump(self, msg, fn_):
'''
Serialize the correct data into the named file object
'''
fn_.write(self.dumps(msg))
fn_.close()
class SREQ(object):
'''
Create a generic interface to wrap salt zeromq req calls.
'''
def __init__(self, master, id_='', serial='msgpack', linger=0):
self.master = master
self.id_ = id_
self.serial = Serial(serial)
self.linger = linger
self.context = zmq.Context()
self.poller = zmq.Poller()
@property
def socket(self):
'''
Lazily create the socket.
'''
if not hasattr(self, '_socket'):
# create a new one
self._socket = self.context.socket(zmq.REQ)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
if self.master.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self._socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self._socket.setsockopt(zmq.IPV4ONLY, 0)
self._socket.linger = self.linger
if self.id_:
self._socket.setsockopt(zmq.IDENTITY, self.id_)
self._socket.connect(self.master)
return self._socket
def clear_socket(self):
'''
delete socket if you have it
'''
if hasattr(self, '_socket'):
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
self.poller.unregister(socket[0])
del self._socket
def send(self, enc, load, tries=1, timeout=60):
'''
Takes two arguments, the encryption type and the base payload
'''
payload = {'enc': enc}
payload['load'] = load
pkg = self.serial.dumps(payload)
self.socket.send(pkg)
self.poller.register(self.socket, zmq.POLLIN)
tried = 0
while True:
polled = self.poller.poll(timeout * 1000)
tried += 1
if polled:
break
if tries > 1:
log.info('SaltReqTimeoutError: after {0} seconds. (Try {1} of {2})'.format(
timeout, tried, tries))
if tried >= tries:
self.clear_socket()
raise SaltReqTimeoutError(
'SaltReqTimeoutError: after {0} seconds, ran {1} tries'.format(timeout * tried, tried)
)
return self.serial.loads(self.socket.recv())
def send_auto(self, payload, tries=1, timeout=60):
'''
Detect the encryption type based on the payload
'''
enc = payload.get('enc', 'clear')
load = payload.get('load', {})
return self.send(enc, load, tries, timeout)
def destroy(self):
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
if socket.closed is False:
socket.setsockopt(zmq.LINGER, 1)
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].setsockopt(zmq.LINGER, 1)
socket[0].close()
self.poller.unregister(socket[0])
if self.socket.closed is False:
self.socket.setsockopt(zmq.LINGER, 1)
self.socket.close()
if self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
| 33.155642 | 106 | 0.566013 |
from __future__ import absolute_import
import logging
import salt.log
import salt.ext.six as six
from salt.exceptions import SaltReqTimeoutError
try:
import zmq
except ImportError:
pass
log = logging.getLogger(__name__)
try:
import msgpack
if msgpack.loads(msgpack.dumps([1, 2, 3]), use_list=True) is None:
raise ImportError
except ImportError:
try:
import msgpack_pure as msgpack
except ImportError:
LOG_FORMAT = '[%(levelname)-8s] %(message)s'
salt.log.setup_console_logger(log_format=LOG_FORMAT)
log.fatal('Unable to import msgpack or msgpack_pure python modules')
# work without msgpack
#sys.exit(salt.exitcodes.EX_GENERIC)
def package(payload):
return msgpack.dumps(payload)
def unpackage(package_):
return msgpack.loads(package_, use_list=True)
def format_payload(enc, **kwargs):
payload = {'enc': enc}
load = {}
for key in kwargs:
load[key] = kwargs[key]
payload['load'] = load
return package(payload)
class Serial(object):
def __init__(self, opts):
if isinstance(opts, dict):
self.serial = opts.get('serial', 'msgpack')
elif isinstance(opts, str):
self.serial = opts
else:
self.serial = 'msgpack'
def loads(self, msg):
try:
return msgpack.loads(msg, use_list=True)
except Exception as exc:
log.critical('Could not deserialize msgpack message: {0}'
'This often happens when trying to read a file not in binary mode.'
'Please open an issue and include the following error: {1}'.format(msg, exc))
raise
def load(self, fn_):
data = fn_.read()
fn_.close()
return self.loads(data)
def dumps(self, msg):
try:
return msgpack.dumps(msg)
except TypeError:
if msgpack.version >= (0, 2, 0):
# Should support OrderedDict serialization, so, let's
raise
# Since OrderedDict is identified as a dictionary, we can't
def odict_encoder(obj):
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = odict_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = odict_encoder(entry)
return obj
return obj
return msgpack.dumps(odict_encoder(msg))
except SystemError as exc:
log.critical('Unable to serialize message! Consider upgrading msgpack. '
'Message which failed was {failed_message} '
'with exception {exception_message}').format(msg, exc)
def dump(self, msg, fn_):
fn_.write(self.dumps(msg))
fn_.close()
class SREQ(object):
def __init__(self, master, id_='', serial='msgpack', linger=0):
self.master = master
self.id_ = id_
self.serial = Serial(serial)
self.linger = linger
self.context = zmq.Context()
self.poller = zmq.Poller()
@property
def socket(self):
if not hasattr(self, '_socket'):
self._socket = self.context.socket(zmq.REQ)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
if self.master.startswith('tcp://['):
if hasattr(zmq, 'IPV6'):
self._socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self._socket.setsockopt(zmq.IPV4ONLY, 0)
self._socket.linger = self.linger
if self.id_:
self._socket.setsockopt(zmq.IDENTITY, self.id_)
self._socket.connect(self.master)
return self._socket
def clear_socket(self):
if hasattr(self, '_socket'):
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
self.poller.unregister(socket[0])
del self._socket
def send(self, enc, load, tries=1, timeout=60):
payload = {'enc': enc}
payload['load'] = load
pkg = self.serial.dumps(payload)
self.socket.send(pkg)
self.poller.register(self.socket, zmq.POLLIN)
tried = 0
while True:
polled = self.poller.poll(timeout * 1000)
tried += 1
if polled:
break
if tries > 1:
log.info('SaltReqTimeoutError: after {0} seconds. (Try {1} of {2})'.format(
timeout, tried, tries))
if tried >= tries:
self.clear_socket()
raise SaltReqTimeoutError(
'SaltReqTimeoutError: after {0} seconds, ran {1} tries'.format(timeout * tried, tried)
)
return self.serial.loads(self.socket.recv())
def send_auto(self, payload, tries=1, timeout=60):
enc = payload.get('enc', 'clear')
load = payload.get('load', {})
return self.send(enc, load, tries, timeout)
def destroy(self):
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
if socket.closed is False:
socket.setsockopt(zmq.LINGER, 1)
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].setsockopt(zmq.LINGER, 1)
socket[0].close()
self.poller.unregister(socket[0])
if self.socket.closed is False:
self.socket.setsockopt(zmq.LINGER, 1)
self.socket.close()
if self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
| true | true |
1c3190611d1d696efa23bd959e37084e1e83566d | 686 | py | Python | HackerRank/CountingValleys.py | me1007mea/hacktoberfest2020 | 7d29e18f4486e392ed3d7653536d4883004e1a2b | [
"MIT"
] | 11 | 2020-10-14T05:43:12.000Z | 2021-10-02T02:29:52.000Z | HackerRank/CountingValleys.py | me1007mea/hacktoberfest2020 | 7d29e18f4486e392ed3d7653536d4883004e1a2b | [
"MIT"
] | 10 | 2020-10-14T15:15:41.000Z | 2020-10-31T17:13:33.000Z | HackerRank/CountingValleys.py | me1007mea/hacktoberfest2020 | 7d29e18f4486e392ed3d7653536d4883004e1a2b | [
"MIT"
] | 91 | 2020-10-13T15:38:28.000Z | 2021-10-02T02:29:55.000Z | import math
import os
import random
import re
import sys
# Complete the countingValleys function below.
def countingValleys(n, s):
valley = 0
level = 0
flag = 0
for i in s:
if (i =="U" and flag == 0):
level+=1
elif(i=="D"):
level-=1
if(level<0):
flag=1
elif(i=="U" and flag == 1):
level+=1
if(level==0):
valley+=1
return valley
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
| 18.052632 | 47 | 0.48688 | import math
import os
import random
import re
import sys
def countingValleys(n, s):
valley = 0
level = 0
flag = 0
for i in s:
if (i =="U" and flag == 0):
level+=1
elif(i=="D"):
level-=1
if(level<0):
flag=1
elif(i=="U" and flag == 1):
level+=1
if(level==0):
valley+=1
return valley
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
| true | true |
1c319219a636729dc3c8f675b3628b06661c71fe | 368 | py | Python | django/forms/__init__.py | jpmallarino/django | 659d2421c7adbbcd205604002d521d82d6b0b465 | [
"BSD-3-Clause",
"0BSD"
] | 61,676 | 2015-01-01T00:05:13.000Z | 2022-03-31T20:37:54.000Z | checkerista/.env/Lib/site-packages/django/forms/__init__.py | LybaFatimaNasir/CS311S20PID02 | bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39 | [
"MIT"
] | 8,884 | 2015-01-01T00:12:05.000Z | 2022-03-31T19:53:11.000Z | checkerista/.env/Lib/site-packages/django/forms/__init__.py | LybaFatimaNasir/CS311S20PID02 | bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39 | [
"MIT"
] | 33,143 | 2015-01-01T02:04:52.000Z | 2022-03-31T19:42:46.000Z | """
Django validation and HTML form handling.
"""
from django.core.exceptions import ValidationError # NOQA
from django.forms.boundfield import * # NOQA
from django.forms.fields import * # NOQA
from django.forms.forms import * # NOQA
from django.forms.formsets import * # NOQA
from django.forms.models import * # NOQA
from django.forms.widgets import * # NOQA
| 30.666667 | 58 | 0.75 |
from django.core.exceptions import ValidationError
from django.forms.boundfield import *
from django.forms.fields import *
from django.forms.forms import *
from django.forms.formsets import *
from django.forms.models import *
from django.forms.widgets import *
| true | true |
1c31923846bf8a7dea51228bfd3af59166e5308e | 3,351 | py | Python | airflow/contrib/sensors/gcs_sensor.py | jkugiya/airflow | 1dfbb8d2031cb8a3e95e4bf91aa478857c5c3a85 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2021-07-30T16:46:46.000Z | 2021-10-19T07:18:47.000Z | airflow/contrib/sensors/gcs_sensor.py | jkugiya/airflow | 1dfbb8d2031cb8a3e95e4bf91aa478857c5c3a85 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6 | 2019-03-16T02:09:30.000Z | 2019-06-27T03:27:34.000Z | airflow/contrib/sensors/gcs_sensor.py | jkugiya/airflow | 1dfbb8d2031cb8a3e95e4bf91aa478857c5c3a85 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-10-03T10:59:50.000Z | 2020-10-03T10:59:50.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.sensors.gcs`."""
import warnings
from airflow.providers.google.cloud.sensors.gcs import (
GCSObjectExistenceSensor,
GCSObjectsWithPrefixExistenceSensor,
GCSObjectUpdateSensor,
GCSUploadSessionCompleteSensor,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.sensors.gcs`.",
DeprecationWarning,
stacklevel=2,
)
class GoogleCloudStorageObjectSensor(GCSObjectExistenceSensor):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectExistenceSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectExistenceSensor`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GoogleCloudStorageObjectUpdatedSensor(GCSObjectUpdateSensor):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectUpdateSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectUpdateSensor`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GoogleCloudStoragePrefixSensor(GCSObjectsWithPrefixExistenceSensor):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectsWithPrefixExistenceSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectsWithPrefixExistenceSensor`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GoogleCloudStorageUploadSessionCompleteSensor(GCSUploadSessionCompleteSensor):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSUploadSessionCompleteSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSUploadSessionCompleteSensor`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
| 34.193878 | 108 | 0.698001 |
import warnings
from airflow.providers.google.cloud.sensors.gcs import (
GCSObjectExistenceSensor,
GCSObjectsWithPrefixExistenceSensor,
GCSObjectUpdateSensor,
GCSUploadSessionCompleteSensor,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.sensors.gcs`.",
DeprecationWarning,
stacklevel=2,
)
class GoogleCloudStorageObjectSensor(GCSObjectExistenceSensor):
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectExistenceSensor`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GoogleCloudStorageObjectUpdatedSensor(GCSObjectUpdateSensor):
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectUpdateSensor`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GoogleCloudStoragePrefixSensor(GCSObjectsWithPrefixExistenceSensor):
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectsWithPrefixExistenceSensor`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class GoogleCloudStorageUploadSessionCompleteSensor(GCSUploadSessionCompleteSensor):
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSUploadSessionCompleteSensor`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
| true | true |
1c3192961af85632a1f3e4f3011c0ea12b0a3ab4 | 2,811 | py | Python | homeassistant/components/velux/cover.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 1 | 2019-02-18T03:16:32.000Z | 2019-02-18T03:16:32.000Z | homeassistant/components/velux/cover.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 3 | 2021-09-08T03:29:36.000Z | 2022-03-12T00:59:48.000Z | homeassistant/components/velux/cover.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 1 | 2019-09-28T07:06:08.000Z | 2019-09-28T07:06:08.000Z | """Support for Velux covers."""
from homeassistant.components.cover import (
ATTR_POSITION, SUPPORT_CLOSE, SUPPORT_OPEN, SUPPORT_SET_POSITION,
SUPPORT_STOP, CoverDevice)
from homeassistant.components.velux import DATA_VELUX
from homeassistant.core import callback
DEPENDENCIES = ['velux']
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up cover(s) for Velux platform."""
entities = []
for node in hass.data[DATA_VELUX].pyvlx.nodes:
from pyvlx import OpeningDevice
if isinstance(node, OpeningDevice):
entities.append(VeluxCover(node))
async_add_entities(entities)
class VeluxCover(CoverDevice):
"""Representation of a Velux cover."""
def __init__(self, node):
"""Initialize the cover."""
self.node = node
@callback
def async_register_callbacks(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.node.register_device_updated_cb(after_update_callback)
async def async_added_to_hass(self):
"""Store register state change callback."""
self.async_register_callbacks()
@property
def name(self):
"""Return the name of the Velux device."""
return self.node.name
@property
def should_poll(self):
"""No polling needed within Velux."""
return False
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | \
SUPPORT_SET_POSITION | SUPPORT_STOP
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return 100 - self.node.position.position_percent
@property
def device_class(self):
"""Define this cover as a window."""
return 'window'
@property
def is_closed(self):
"""Return if the cover is closed."""
return self.node.position.closed
async def async_close_cover(self, **kwargs):
"""Close the cover."""
await self.node.close()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
await self.node.open()
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
if ATTR_POSITION in kwargs:
position_percent = 100 - kwargs[ATTR_POSITION]
from pyvlx import Position
await self.node.set_position(
Position(position_percent=position_percent))
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
await self.node.stop()
| 30.89011 | 73 | 0.652793 | from homeassistant.components.cover import (
ATTR_POSITION, SUPPORT_CLOSE, SUPPORT_OPEN, SUPPORT_SET_POSITION,
SUPPORT_STOP, CoverDevice)
from homeassistant.components.velux import DATA_VELUX
from homeassistant.core import callback
DEPENDENCIES = ['velux']
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
entities = []
for node in hass.data[DATA_VELUX].pyvlx.nodes:
from pyvlx import OpeningDevice
if isinstance(node, OpeningDevice):
entities.append(VeluxCover(node))
async_add_entities(entities)
class VeluxCover(CoverDevice):
def __init__(self, node):
self.node = node
@callback
def async_register_callbacks(self):
async def after_update_callback(device):
await self.async_update_ha_state()
self.node.register_device_updated_cb(after_update_callback)
async def async_added_to_hass(self):
self.async_register_callbacks()
@property
def name(self):
return self.node.name
@property
def should_poll(self):
return False
@property
def supported_features(self):
return SUPPORT_OPEN | SUPPORT_CLOSE | \
SUPPORT_SET_POSITION | SUPPORT_STOP
@property
def current_cover_position(self):
return 100 - self.node.position.position_percent
@property
def device_class(self):
return 'window'
@property
def is_closed(self):
return self.node.position.closed
async def async_close_cover(self, **kwargs):
await self.node.close()
async def async_open_cover(self, **kwargs):
await self.node.open()
async def async_set_cover_position(self, **kwargs):
if ATTR_POSITION in kwargs:
position_percent = 100 - kwargs[ATTR_POSITION]
from pyvlx import Position
await self.node.set_position(
Position(position_percent=position_percent))
async def async_stop_cover(self, **kwargs):
await self.node.stop()
| true | true |
1c3192dece33069100e20887af8fa0eccd6f7cd5 | 310 | py | Python | src/main/python/code_style/rules/RuleResult.py | Judge-Girl/Code-Quality-Inspection | 6d0d455fb1c47eba17157a99db5e09fbc61484b8 | [
"Apache-2.0"
] | 1 | 2021-04-22T08:29:03.000Z | 2021-04-22T08:29:03.000Z | src/main/python/code_style/rules/RuleResult.py | Judge-Girl/Code-Quality-Inspection | 6d0d455fb1c47eba17157a99db5e09fbc61484b8 | [
"Apache-2.0"
] | 12 | 2020-11-20T10:05:15.000Z | 2021-01-19T11:10:59.000Z | src/main/python/code_style/rules/RuleResult.py | Judge-Girl/Code-Quality-Inspection | 6d0d455fb1c47eba17157a99db5e09fbc61484b8 | [
"Apache-2.0"
] | null | null | null |
class RuleResult:
def __init__(self, rule_name: str):
self.rule_name = rule_name
def serialize(self):
raise Exception('Abstract method \'serialize\' not implemented')
def collect_from_child(self):
raise Exception('Abstract method \'collect_from_child\' not implemented')
| 28.181818 | 81 | 0.696774 |
class RuleResult:
def __init__(self, rule_name: str):
self.rule_name = rule_name
def serialize(self):
raise Exception('Abstract method \'serialize\' not implemented')
def collect_from_child(self):
raise Exception('Abstract method \'collect_from_child\' not implemented')
| true | true |
1c31940648d4407e8f868d7f5002b96df0b3b0c8 | 260 | py | Python | protocol_controller/plugins/cbsd_sas/validators/grant_request.py | magma/domain-proxy | e6567740e1780d011b0b3ebd366e134d77f434b3 | [
"BSD-3-Clause"
] | null | null | null | protocol_controller/plugins/cbsd_sas/validators/grant_request.py | magma/domain-proxy | e6567740e1780d011b0b3ebd366e134d77f434b3 | [
"BSD-3-Clause"
] | 298 | 2021-03-31T19:29:45.000Z | 2022-03-31T11:30:44.000Z | protocol_controller/plugins/cbsd_sas/validators/grant_request.py | openEPC/domain-proxy | e6567740e1780d011b0b3ebd366e134d77f434b3 | [
"BSD-3-Clause"
] | 5 | 2021-03-31T09:26:37.000Z | 2021-03-31T21:34:16.000Z | from marshmallow import Schema, fields
class GrantRequestObjectSchema(Schema):
cbsdId = fields.String(required=True)
class GrantRequestSchema(Schema):
grantRequest = fields.Nested(GrantRequestObjectSchema, required=True, many=True, unknown='true')
| 26 | 100 | 0.792308 | from marshmallow import Schema, fields
class GrantRequestObjectSchema(Schema):
cbsdId = fields.String(required=True)
class GrantRequestSchema(Schema):
grantRequest = fields.Nested(GrantRequestObjectSchema, required=True, many=True, unknown='true')
| true | true |
1c3194817682c594752efe933b950c482dfc3caf | 1,325 | py | Python | crawlino/modules/sources_module/models.py | BBVA/crawlino | 685f57e6b3e9356484ead2681bb178f651d2f371 | [
"Apache-2.0"
] | 1 | 2018-11-11T21:07:54.000Z | 2018-11-11T21:07:54.000Z | crawlino/modules/sources_module/models.py | BBVA/crawlino | 685f57e6b3e9356484ead2681bb178f651d2f371 | [
"Apache-2.0"
] | null | null | null | crawlino/modules/sources_module/models.py | BBVA/crawlino | 685f57e6b3e9356484ead2681bb178f651d2f371 | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from crawlino.crawlino_flow import STEP_SOURCES
from crawlino.exceptions import CrawlinoValueError
from crawlino.models import CModelBase, CModelBaseLoader
from crawlino.modules_stores import CrawlinoModulesStore
class CMSource(CModelBase, metaclass=CModelBaseLoader):
__slots__ = ("type", "config", "name")
def __init__(self, type: str, config: Dict, name: str = None):
self.type = type
self.name = name or ""
self.config = config
if not self.type:
raise CrawlinoValueError("Source must has the 'type' property")
if self.config is None:
raise CrawlinoValueError("Source must has a 'config' property")
if CrawlinoModulesStore.find_module(STEP_SOURCES, self.type) is None:
raise CrawlinoValueError("Invalid 'type' property value",
exc_info=True,
extra={
"given_source_type": self.type
})
@property
def to_dict(self):
return {
"type": self.type,
"config": self.config,
"name": self.name
}
@property
def module_name(self) -> str:
return "sources"
__all__ = ("CMSource", )
| 29.444444 | 77 | 0.583396 | from typing import Dict
from crawlino.crawlino_flow import STEP_SOURCES
from crawlino.exceptions import CrawlinoValueError
from crawlino.models import CModelBase, CModelBaseLoader
from crawlino.modules_stores import CrawlinoModulesStore
class CMSource(CModelBase, metaclass=CModelBaseLoader):
__slots__ = ("type", "config", "name")
def __init__(self, type: str, config: Dict, name: str = None):
self.type = type
self.name = name or ""
self.config = config
if not self.type:
raise CrawlinoValueError("Source must has the 'type' property")
if self.config is None:
raise CrawlinoValueError("Source must has a 'config' property")
if CrawlinoModulesStore.find_module(STEP_SOURCES, self.type) is None:
raise CrawlinoValueError("Invalid 'type' property value",
exc_info=True,
extra={
"given_source_type": self.type
})
@property
def to_dict(self):
return {
"type": self.type,
"config": self.config,
"name": self.name
}
@property
def module_name(self) -> str:
return "sources"
__all__ = ("CMSource", )
| true | true |
1c3194b6131259ebc1d582358502bae612a4a187 | 43,344 | py | Python | stix2/test/v21/test_granular_markings.py | 2xyo/cti-python-stix2 | cffee92c7ed18c4cdd54c4370c6a17878dfd36cd | [
"BSD-3-Clause"
] | 9 | 2017-07-03T19:01:23.000Z | 2020-02-20T19:22:49.000Z | stix2/test/v21/test_granular_markings.py | 2xyo/cti-python-stix2 | cffee92c7ed18c4cdd54c4370c6a17878dfd36cd | [
"BSD-3-Clause"
] | 1 | 2017-05-30T14:13:27.000Z | 2017-05-30T14:13:27.000Z | stix2/test/v21/test_granular_markings.py | 2xyo/cti-python-stix2 | cffee92c7ed18c4cdd54c4370c6a17878dfd36cd | [
"BSD-3-Clause"
] | 1 | 2017-05-26T19:17:14.000Z | 2017-05-26T19:17:14.000Z | import pytest
from stix2 import markings
from stix2.exceptions import InvalidSelectorError, MarkingNotFoundError
from stix2.v21 import TLP_RED, Malware
from .constants import MALWARE_MORE_KWARGS as MALWARE_KWARGS_CONST
from .constants import MARKING_IDS, MARKING_LANGS
"""Tests for the Data Markings API."""
MALWARE_KWARGS = MALWARE_KWARGS_CONST.copy()
def test_add_marking_mark_one_selector_multiple_refs():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
@pytest.mark.parametrize(
"data", [
(
Malware(**MALWARE_KWARGS),
Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
),
MARKING_IDS[0],
),
(
MALWARE_KWARGS,
dict(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
),
MARKING_IDS[0],
),
(
Malware(**MALWARE_KWARGS),
Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": TLP_RED.id,
},
],
**MALWARE_KWARGS
),
TLP_RED,
),
],
)
def test_add_marking_mark_multiple_selector_one_refs(data):
before = data[0]
after = data[1]
before = markings.add_markings(before, data[2], ["description", "name"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_add_marking_mark_multiple_selector_multiple_refs():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description", "name"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_add_marking_mark_multiple_selector_multiple_refs_mixed():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["description", "name"],
"lang": MARKING_LANGS[0],
},
{
"selectors": ["description", "name"],
"lang": MARKING_LANGS[1],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0], MARKING_IDS[1], MARKING_LANGS[0], MARKING_LANGS[1]], ["description", "name"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_add_marking_mark_another_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0]], ["name"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_add_marking_mark_same_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
@pytest.mark.parametrize(
"data,marking", [
(
{"description": "test description"},
[
["title"], ["marking-definition--1", "marking-definition--2"],
"", ["marking-definition--1", "marking-definition--2"],
[], ["marking-definition--1", "marking-definition--2"],
[""], ["marking-definition--1", "marking-definition--2"],
["description"], [""],
["description"], [],
["description"], ["marking-definition--1", 456],
],
),
],
)
def test_add_marking_bad_selector(data, marking):
with pytest.raises(InvalidSelectorError):
markings.add_markings(data, marking[0], marking[1])
GET_MARKINGS_TEST_DATA = {
"a": 333,
"b": "value",
"c": [
17,
"list value",
{
"g": "nested",
"h": 45,
},
],
"x": {
"y": [
"hello",
88,
],
"z": {
"foo1": "bar",
"foo2": 65,
},
},
"granular_markings": [
{
"marking_ref": "1",
"selectors": ["a"],
},
{
"marking_ref": "2",
"selectors": ["c"],
},
{
"marking_ref": "3",
"selectors": ["c.[1]"],
},
{
"marking_ref": "4",
"selectors": ["c.[2]"],
},
{
"marking_ref": "5",
"selectors": ["c.[2].g"],
},
{
"marking_ref": "6",
"selectors": ["x"],
},
{
"marking_ref": "7",
"selectors": ["x.y"],
},
{
"marking_ref": "8",
"selectors": ["x.y.[1]"],
},
{
"marking_ref": "9",
"selectors": ["x.z"],
},
{
"marking_ref": "10",
"selectors": ["x.z.foo2"],
},
],
}
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA])
def test_get_markings_smoke(data):
"""Test get_markings does not fail."""
assert len(markings.get_markings(data, "a")) >= 1
assert markings.get_markings(data, "a") == ["1"]
@pytest.mark.parametrize(
"data", [
GET_MARKINGS_TEST_DATA,
{"b": 1234},
],
)
def test_get_markings_not_marked(data):
"""Test selector that is not marked returns empty list."""
results = markings.get_markings(data, "b")
assert len(results) == 0
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA])
def test_get_markings_multiple_selectors(data):
"""Test multiple selectors return combination of markings."""
total = markings.get_markings(data, ["x.y", "x.z"])
xy_markings = markings.get_markings(data, ["x.y"])
xz_markings = markings.get_markings(data, ["x.z"])
assert set(xy_markings).issubset(total)
assert set(xz_markings).issubset(total)
assert set(xy_markings).union(xz_markings).issuperset(total)
@pytest.mark.parametrize(
"data,selector", [
(GET_MARKINGS_TEST_DATA, "foo"),
(GET_MARKINGS_TEST_DATA, ""),
(GET_MARKINGS_TEST_DATA, []),
(GET_MARKINGS_TEST_DATA, [""]),
(GET_MARKINGS_TEST_DATA, "x.z.[-2]"),
(GET_MARKINGS_TEST_DATA, "c.f"),
(GET_MARKINGS_TEST_DATA, "c.[2].i"),
(GET_MARKINGS_TEST_DATA, "c.[3]"),
(GET_MARKINGS_TEST_DATA, "d"),
(GET_MARKINGS_TEST_DATA, "x.[0]"),
(GET_MARKINGS_TEST_DATA, "z.y.w"),
(GET_MARKINGS_TEST_DATA, "x.z.[1]"),
(GET_MARKINGS_TEST_DATA, "x.z.foo3"),
],
)
def test_get_markings_bad_selector(data, selector):
"""Test bad selectors raise exception"""
with pytest.raises(InvalidSelectorError):
markings.get_markings(data, selector)
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA])
def test_get_markings_positional_arguments_combinations(data):
"""Test multiple combinations for inherited and descendant markings."""
assert set(markings.get_markings(data, "a", False, False)) == set(["1"])
assert set(markings.get_markings(data, "a", True, False)) == set(["1"])
assert set(markings.get_markings(data, "a", True, True)) == set(["1"])
assert set(markings.get_markings(data, "a", False, True)) == set(["1"])
assert set(markings.get_markings(data, "b", False, False)) == set([])
assert set(markings.get_markings(data, "b", True, False)) == set([])
assert set(markings.get_markings(data, "b", True, True)) == set([])
assert set(markings.get_markings(data, "b", False, True)) == set([])
assert set(markings.get_markings(data, "c", False, False)) == set(["2"])
assert set(markings.get_markings(data, "c", True, False)) == set(["2"])
assert set(markings.get_markings(data, "c", True, True)) == set(["2", "3", "4", "5"])
assert set(markings.get_markings(data, "c", False, True)) == set(["2", "3", "4", "5"])
assert set(markings.get_markings(data, "c.[0]", False, False)) == set([])
assert set(markings.get_markings(data, "c.[0]", True, False)) == set(["2"])
assert set(markings.get_markings(data, "c.[0]", True, True)) == set(["2"])
assert set(markings.get_markings(data, "c.[0]", False, True)) == set([])
assert set(markings.get_markings(data, "c.[1]", False, False)) == set(["3"])
assert set(markings.get_markings(data, "c.[1]", True, False)) == set(["2", "3"])
assert set(markings.get_markings(data, "c.[1]", True, True)) == set(["2", "3"])
assert set(markings.get_markings(data, "c.[1]", False, True)) == set(["3"])
assert set(markings.get_markings(data, "c.[2]", False, False)) == set(["4"])
assert set(markings.get_markings(data, "c.[2]", True, False)) == set(["2", "4"])
assert set(markings.get_markings(data, "c.[2]", True, True)) == set(["2", "4", "5"])
assert set(markings.get_markings(data, "c.[2]", False, True)) == set(["4", "5"])
assert set(markings.get_markings(data, "c.[2].g", False, False)) == set(["5"])
assert set(markings.get_markings(data, "c.[2].g", True, False)) == set(["2", "4", "5"])
assert set(markings.get_markings(data, "c.[2].g", True, True)) == set(["2", "4", "5"])
assert set(markings.get_markings(data, "c.[2].g", False, True)) == set(["5"])
assert set(markings.get_markings(data, "x", False, False)) == set(["6"])
assert set(markings.get_markings(data, "x", True, False)) == set(["6"])
assert set(markings.get_markings(data, "x", True, True)) == set(["6", "7", "8", "9", "10"])
assert set(markings.get_markings(data, "x", False, True)) == set(["6", "7", "8", "9", "10"])
assert set(markings.get_markings(data, "x.y", False, False)) == set(["7"])
assert set(markings.get_markings(data, "x.y", True, False)) == set(["6", "7"])
assert set(markings.get_markings(data, "x.y", True, True)) == set(["6", "7", "8"])
assert set(markings.get_markings(data, "x.y", False, True)) == set(["7", "8"])
assert set(markings.get_markings(data, "x.y.[0]", False, False)) == set([])
assert set(markings.get_markings(data, "x.y.[0]", True, False)) == set(["6", "7"])
assert set(markings.get_markings(data, "x.y.[0]", True, True)) == set(["6", "7"])
assert set(markings.get_markings(data, "x.y.[0]", False, True)) == set([])
assert set(markings.get_markings(data, "x.y.[1]", False, False)) == set(["8"])
assert set(markings.get_markings(data, "x.y.[1]", True, False)) == set(["6", "7", "8"])
assert set(markings.get_markings(data, "x.y.[1]", True, True)) == set(["6", "7", "8"])
assert set(markings.get_markings(data, "x.y.[1]", False, True)) == set(["8"])
assert set(markings.get_markings(data, "x.z", False, False)) == set(["9"])
assert set(markings.get_markings(data, "x.z", True, False)) == set(["6", "9"])
assert set(markings.get_markings(data, "x.z", True, True)) == set(["6", "9", "10"])
assert set(markings.get_markings(data, "x.z", False, True)) == set(["9", "10"])
assert set(markings.get_markings(data, "x.z.foo1", False, False)) == set([])
assert set(markings.get_markings(data, "x.z.foo1", True, False)) == set(["6", "9"])
assert set(markings.get_markings(data, "x.z.foo1", True, True)) == set(["6", "9"])
assert set(markings.get_markings(data, "x.z.foo1", False, True)) == set([])
assert set(markings.get_markings(data, "x.z.foo2", False, False)) == set(["10"])
assert set(markings.get_markings(data, "x.z.foo2", True, False)) == set(["6", "9", "10"])
assert set(markings.get_markings(data, "x.z.foo2", True, True)) == set(["6", "9", "10"])
assert set(markings.get_markings(data, "x.z.foo2", False, True)) == set(["10"])
GET_MARKINGS_TEST_DATA_LANGS = {
"a": 333,
"b": "value",
"c": [
17,
"list value",
{
"g": "nested",
"h": 45,
},
],
"x": {
"y": [
"hello",
88,
],
"z": {
"foo1": "bar",
"foo2": 65,
},
},
"granular_markings": [
{
"marking_ref": "m1",
"selectors": ["a"],
},
{
"marking_ref": "m2",
"selectors": ["c"],
},
{
"marking_ref": "m3",
"selectors": ["c.[1]"],
},
{
"marking_ref": "m4",
"selectors": ["c.[2]"],
},
{
"marking_ref": "m5",
"selectors": ["c.[2].g"],
},
{
"marking_ref": "m6",
"selectors": ["x"],
},
{
"lang": "l7",
"selectors": ["x.y"],
},
{
"marking_ref": "m8",
"selectors": ["x.y.[1]"],
},
{
"lang": "l9",
"selectors": ["x.z"],
},
{
"marking_ref": "m9",
"selectors": ["x.z"],
},
{
"marking_ref": "m10",
"selectors": ["x.z.foo2"],
},
],
}
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA_LANGS])
def test_get_markings_multiple_selectors_langs(data):
"""Test multiple selectors return combination of markings."""
total = markings.get_markings(data, ["x.y", "x.z"])
xy_markings = markings.get_markings(data, ["x.y"])
xz_markings = markings.get_markings(data, ["x.z"])
assert set(xy_markings).issubset(total)
assert set(xz_markings).issubset(total)
assert set(xy_markings).union(xz_markings).issuperset(total)
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA_LANGS])
def test_get_markings_multiple_selectors_with_options(data):
"""Test multiple selectors return combination of markings."""
total = markings.get_markings(data, ["x.y", "x.z"], lang=False)
xz_markings = markings.get_markings(data, ["x.z"], marking_ref=False)
assert len(total) == 1
assert len(xz_markings) == 1
@pytest.mark.parametrize(
"data", [
(
Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
),
[MARKING_IDS[0], MARKING_IDS[1]],
),
(
dict(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
),
[MARKING_IDS[0], MARKING_IDS[1]],
),
],
)
def test_remove_marking_remove_one_selector_with_multiple_refs(data):
before = markings.remove_markings(data[0], data[1], ["description"])
assert "granular_markings" not in before
def test_remove_marking_remove_multiple_selector_one_ref():
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, MARKING_IDS[0], ["description", "modified"])
assert "granular_markings" not in before
def test_remove_marking_mark_one_selector_from_multiple_ones():
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0]], ["modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_remove_marking_mark_one_selector_from_multiple_ones_mixed():
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"lang": MARKING_LANGS[0],
},
],
**MALWARE_KWARGS
)
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"lang": MARKING_LANGS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0], MARKING_LANGS[0]], ["modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_remove_marking_mark_one_selector_markings_from_multiple_ones():
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0]], ["modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_remove_marking_mark_mutilple_selector_multiple_refs():
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description", "modified"])
assert "granular_markings" not in before
def test_remove_marking_mark_another_property_same_marking():
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["modified"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0]], ["modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_remove_marking_mark_same_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0]], ["description"])
assert "granular_markings" not in before
def test_remove_no_markings():
before = {
"description": "test description",
}
after = markings.remove_markings(before, ["marking-definition--1"], ["description"])
assert before == after
def test_remove_marking_bad_selector():
before = {
"description": "test description",
}
with pytest.raises(InvalidSelectorError):
markings.remove_markings(before, ["marking-definition--1", "marking-definition--2"], ["title"])
def test_remove_marking_not_present():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
with pytest.raises(MarkingNotFoundError):
markings.remove_markings(before, [MARKING_IDS[1]], ["description"])
IS_MARKED_TEST_DATA = [
Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["malware_types", "description"],
"marking_ref": MARKING_IDS[2],
},
{
"selectors": ["malware_types", "description"],
"marking_ref": MARKING_IDS[3],
},
{
"selectors": ["name"],
"lang": MARKING_LANGS[1],
},
],
**MALWARE_KWARGS
),
dict(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["malware_types", "description"],
"marking_ref": MARKING_IDS[2],
},
{
"selectors": ["malware_types", "description"],
"marking_ref": MARKING_IDS[3],
},
{
"selectors": ["name"],
"lang": MARKING_LANGS[1],
},
],
**MALWARE_KWARGS
),
]
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_smoke(data):
"""Smoke test is_marked call does not fail."""
assert markings.is_marked(data, selectors=["description"])
assert markings.is_marked(data, selectors=["modified"]) is False
assert markings.is_marked(data, selectors=["name"])
@pytest.mark.parametrize(
"data,selector", [
(IS_MARKED_TEST_DATA[0], "foo"),
(IS_MARKED_TEST_DATA[0], ""),
(IS_MARKED_TEST_DATA[0], []),
(IS_MARKED_TEST_DATA[0], [""]),
(IS_MARKED_TEST_DATA[0], "x.z.[-2]"),
(IS_MARKED_TEST_DATA[0], "c.f"),
(IS_MARKED_TEST_DATA[0], "c.[2].i"),
(IS_MARKED_TEST_DATA[1], "c.[3]"),
(IS_MARKED_TEST_DATA[1], "d"),
(IS_MARKED_TEST_DATA[1], "x.[0]"),
(IS_MARKED_TEST_DATA[1], "z.y.w"),
(IS_MARKED_TEST_DATA[1], "x.z.[1]"),
(IS_MARKED_TEST_DATA[1], "x.z.foo3"),
],
)
def test_is_marked_invalid_selector(data, selector):
"""Test invalid selector raises an error."""
with pytest.raises(InvalidSelectorError):
markings.is_marked(data, selectors=selector)
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_mix_selector(data):
"""Test valid selector, one marked and one not marked returns True."""
assert markings.is_marked(data, selectors=["description", "malware_types"])
assert markings.is_marked(data, selectors=["description"])
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_valid_selector_no_refs(data):
"""Test that a valid selector return True when it has marking refs and False when not."""
assert markings.is_marked(data, selectors=["description"])
assert markings.is_marked(data, [MARKING_IDS[2], MARKING_IDS[3]], ["description"])
assert markings.is_marked(data, [MARKING_IDS[2]], ["description"])
assert markings.is_marked(data, [MARKING_IDS[2], MARKING_IDS[5]], ["description"]) is False
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_valid_selector_and_refs(data):
"""Test that a valid selector returns True when marking_refs match."""
assert markings.is_marked(data, [MARKING_IDS[1]], ["description"])
assert markings.is_marked(data, [MARKING_IDS[1]], ["modified"]) is False
assert markings.is_marked(data, [MARKING_LANGS[1]], ["name"])
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_valid_selector_multiple_refs(data):
"""Test that a valid selector returns True if aall marking_refs match.
Otherwise False."""
assert markings.is_marked(data, [MARKING_IDS[2], MARKING_IDS[3]], ["malware_types"])
assert markings.is_marked(data, [MARKING_IDS[2], MARKING_IDS[1]], ["malware_types"]) is False
assert markings.is_marked(data, MARKING_IDS[2], ["malware_types"])
assert markings.is_marked(data, ["marking-definition--1234"], ["malware_types"]) is False
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_no_marking_refs(data):
"""Test that a valid content selector with no marking_refs returns True
if there is a granular_marking that asserts that field, False
otherwise."""
assert markings.is_marked(data, selectors=["type"]) is False
assert markings.is_marked(data, selectors=["malware_types"])
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_no_selectors(data):
"""Test that we're ensuring 'selectors' is provided."""
with pytest.raises(TypeError) as excinfo:
markings.granular_markings.is_marked(data)
assert "'selectors' must be provided" in str(excinfo.value)
def test_is_marked_positional_arguments_combinations():
"""Test multiple combinations for inherited and descendant markings."""
test_sdo = \
{
"a": 333,
"b": "value",
"c": [
17,
"list value",
{
"g": "nested",
"h": 45,
},
],
"x": {
"y": [
"hello",
88,
],
"z": {
"foo1": "bar",
"foo2": 65,
},
},
"granular_markings": [
{
"marking_ref": "1",
"selectors": ["a"],
},
{
"marking_ref": "2",
"selectors": ["c"],
},
{
"marking_ref": "3",
"selectors": ["c.[1]"],
},
{
"marking_ref": "4",
"selectors": ["c.[2]"],
},
{
"marking_ref": "5",
"selectors": ["c.[2].g"],
},
{
"marking_ref": "6",
"selectors": ["x"],
},
{
"marking_ref": "7",
"selectors": ["x.y"],
},
{
"marking_ref": "8",
"selectors": ["x.y.[1]"],
},
{
"marking_ref": "9",
"selectors": ["x.z"],
},
{
"marking_ref": "10",
"selectors": ["x.z.foo2"],
},
],
}
assert markings.is_marked(test_sdo, ["1"], "a", False, False)
assert markings.is_marked(test_sdo, ["1"], "a", True, False)
assert markings.is_marked(test_sdo, ["1"], "a", True, True)
assert markings.is_marked(test_sdo, ["1"], "a", False, True)
assert markings.is_marked(test_sdo, "b", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, "b", inherited=True, descendants=False) is False
assert markings.is_marked(test_sdo, "b", inherited=True, descendants=True) is False
assert markings.is_marked(test_sdo, "b", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["2"], "c", False, False)
assert markings.is_marked(test_sdo, ["2"], "c", True, False)
assert markings.is_marked(test_sdo, ["2", "3", "4", "5"], "c", True, True)
assert markings.is_marked(test_sdo, ["2", "3", "4", "5"], "c", False, True)
assert markings.is_marked(test_sdo, "c.[0]", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["2"], "c.[0]", True, False)
assert markings.is_marked(test_sdo, ["2"], "c.[0]", True, True)
assert markings.is_marked(test_sdo, "c.[0]", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["3"], "c.[1]", False, False)
assert markings.is_marked(test_sdo, ["2", "3"], "c.[1]", True, False)
assert markings.is_marked(test_sdo, ["2", "3"], "c.[1]", True, True)
assert markings.is_marked(test_sdo, ["3"], "c.[1]", False, True)
assert markings.is_marked(test_sdo, ["4"], "c.[2]", False, False)
assert markings.is_marked(test_sdo, ["2", "4"], "c.[2]", True, False)
assert markings.is_marked(test_sdo, ["2", "4", "5"], "c.[2]", True, True)
assert markings.is_marked(test_sdo, ["4", "5"], "c.[2]", False, True)
assert markings.is_marked(test_sdo, ["5"], "c.[2].g", False, False)
assert markings.is_marked(test_sdo, ["2", "4", "5"], "c.[2].g", True, False)
assert markings.is_marked(test_sdo, ["2", "4", "5"], "c.[2].g", True, True)
assert markings.is_marked(test_sdo, ["5"], "c.[2].g", False, True)
assert markings.is_marked(test_sdo, ["6"], "x", False, False)
assert markings.is_marked(test_sdo, ["6"], "x", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "8", "9", "10"], "x", True, True)
assert markings.is_marked(test_sdo, ["6", "7", "8", "9", "10"], "x", False, True)
assert markings.is_marked(test_sdo, ["7"], "x.y", False, False)
assert markings.is_marked(test_sdo, ["6", "7"], "x.y", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "8"], "x.y", True, True)
assert markings.is_marked(test_sdo, ["7", "8"], "x.y", False, True)
assert markings.is_marked(test_sdo, "x.y.[0]", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["6", "7"], "x.y.[0]", True, False)
assert markings.is_marked(test_sdo, ["6", "7"], "x.y.[0]", True, True)
assert markings.is_marked(test_sdo, "x.y.[0]", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["8"], "x.y.[1]", False, False)
assert markings.is_marked(test_sdo, ["6", "7", "8"], "x.y.[1]", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "8"], "x.y.[1]", True, True)
assert markings.is_marked(test_sdo, ["8"], "x.y.[1]", False, True)
assert markings.is_marked(test_sdo, ["9"], "x.z", False, False)
assert markings.is_marked(test_sdo, ["6", "9"], "x.z", True, False)
assert markings.is_marked(test_sdo, ["6", "9", "10"], "x.z", True, True)
assert markings.is_marked(test_sdo, ["9", "10"], "x.z", False, True)
assert markings.is_marked(test_sdo, "x.z.foo1", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["6", "9"], "x.z.foo1", True, False)
assert markings.is_marked(test_sdo, ["6", "9"], "x.z.foo1", True, True)
assert markings.is_marked(test_sdo, "x.z.foo1", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["10"], "x.z.foo2", False, False)
assert markings.is_marked(test_sdo, ["6", "9", "10"], "x.z.foo2", True, False)
assert markings.is_marked(test_sdo, ["6", "9", "10"], "x.z.foo2", True, True)
assert markings.is_marked(test_sdo, ["10"], "x.z.foo2", False, True)
def test_create_sdo_with_invalid_marking():
with pytest.raises(InvalidSelectorError) as excinfo:
Malware(
granular_markings=[
{
"selectors": ["foo"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
assert str(excinfo.value) == "Selector foo in Malware is not valid!"
def test_set_marking_mark_one_selector_multiple_refs():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_one_selector_multiple_lang_refs():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"lang": MARKING_LANGS[0],
},
{
"selectors": ["description"],
"lang": MARKING_LANGS[1],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_LANGS[0], MARKING_LANGS[1]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_multiple_selector_one_refs():
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[0]], ["description", "modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_multiple_mixed_markings():
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["description", "modified"],
"lang": MARKING_LANGS[2],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[2],
},
{
"selectors": ["description", "modified"],
"lang": MARKING_LANGS[3],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[2], MARKING_LANGS[3]], ["description", "modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_multiple_selector_multiple_refs_from_none():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description", "modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_another_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[2],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[1], MARKING_IDS[2]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
@pytest.mark.parametrize(
"marking", [
([MARKING_IDS[4], MARKING_IDS[5]], ["foo"]),
([MARKING_IDS[4], MARKING_IDS[5]], ""),
([MARKING_IDS[4], MARKING_IDS[5]], []),
([MARKING_IDS[4], MARKING_IDS[5]], [""]),
],
)
def test_set_marking_bad_selector(marking):
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
with pytest.raises(InvalidSelectorError):
before = markings.set_markings(before, marking[0], marking[1])
assert before == after
def test_set_marking_mark_same_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[0]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
CLEAR_MARKINGS_TEST_DATA = [
Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["modified", "description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["modified", "description", "type"],
"marking_ref": MARKING_IDS[2],
},
],
**MALWARE_KWARGS
),
dict(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["modified", "description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["modified", "description", "type"],
"marking_ref": MARKING_IDS[2],
},
],
**MALWARE_KWARGS
),
]
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_smoke(data):
"""Test clear_marking call does not fail."""
data = markings.clear_markings(data, "modified")
assert markings.is_marked(data, "modified") is False
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_multiple_selectors(data):
"""Test clearing markings for multiple selectors effectively removes associated markings."""
data = markings.clear_markings(data, ["type", "description"])
assert markings.is_marked(data, ["type", "description"]) is False
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_one_selector(data):
"""Test markings associated with one selector were removed."""
data = markings.clear_markings(data, "description")
assert markings.is_marked(data, "description") is False
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_all_selectors(data):
data = markings.clear_markings(data, ["description", "type", "modified"])
assert markings.is_marked(data, "description") is False
assert "granular_markings" not in data
@pytest.mark.parametrize(
"data,selector", [
(CLEAR_MARKINGS_TEST_DATA[0], "foo"),
(CLEAR_MARKINGS_TEST_DATA[0], ""),
(CLEAR_MARKINGS_TEST_DATA[1], []),
(CLEAR_MARKINGS_TEST_DATA[1], [""]),
],
)
def test_clear_marking_bad_selector(data, selector):
"""Test bad selector raises exception."""
with pytest.raises(InvalidSelectorError):
markings.clear_markings(data, selector)
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_not_present(data):
"""Test clearing markings for a selector that has no associated markings."""
with pytest.raises(MarkingNotFoundError):
markings.clear_markings(data, ["malware_types"])
| 33.087023 | 137 | 0.535991 | import pytest
from stix2 import markings
from stix2.exceptions import InvalidSelectorError, MarkingNotFoundError
from stix2.v21 import TLP_RED, Malware
from .constants import MALWARE_MORE_KWARGS as MALWARE_KWARGS_CONST
from .constants import MARKING_IDS, MARKING_LANGS
MALWARE_KWARGS = MALWARE_KWARGS_CONST.copy()
def test_add_marking_mark_one_selector_multiple_refs():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
@pytest.mark.parametrize(
"data", [
(
Malware(**MALWARE_KWARGS),
Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
),
MARKING_IDS[0],
),
(
MALWARE_KWARGS,
dict(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
),
MARKING_IDS[0],
),
(
Malware(**MALWARE_KWARGS),
Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": TLP_RED.id,
},
],
**MALWARE_KWARGS
),
TLP_RED,
),
],
)
def test_add_marking_mark_multiple_selector_one_refs(data):
before = data[0]
after = data[1]
before = markings.add_markings(before, data[2], ["description", "name"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_add_marking_mark_multiple_selector_multiple_refs():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description", "name"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_add_marking_mark_multiple_selector_multiple_refs_mixed():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["description", "name"],
"lang": MARKING_LANGS[0],
},
{
"selectors": ["description", "name"],
"lang": MARKING_LANGS[1],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0], MARKING_IDS[1], MARKING_LANGS[0], MARKING_LANGS[1]], ["description", "name"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_add_marking_mark_another_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "name"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0]], ["name"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_add_marking_mark_same_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
@pytest.mark.parametrize(
"data,marking", [
(
{"description": "test description"},
[
["title"], ["marking-definition--1", "marking-definition--2"],
"", ["marking-definition--1", "marking-definition--2"],
[], ["marking-definition--1", "marking-definition--2"],
[""], ["marking-definition--1", "marking-definition--2"],
["description"], [""],
["description"], [],
["description"], ["marking-definition--1", 456],
],
),
],
)
def test_add_marking_bad_selector(data, marking):
with pytest.raises(InvalidSelectorError):
markings.add_markings(data, marking[0], marking[1])
GET_MARKINGS_TEST_DATA = {
"a": 333,
"b": "value",
"c": [
17,
"list value",
{
"g": "nested",
"h": 45,
},
],
"x": {
"y": [
"hello",
88,
],
"z": {
"foo1": "bar",
"foo2": 65,
},
},
"granular_markings": [
{
"marking_ref": "1",
"selectors": ["a"],
},
{
"marking_ref": "2",
"selectors": ["c"],
},
{
"marking_ref": "3",
"selectors": ["c.[1]"],
},
{
"marking_ref": "4",
"selectors": ["c.[2]"],
},
{
"marking_ref": "5",
"selectors": ["c.[2].g"],
},
{
"marking_ref": "6",
"selectors": ["x"],
},
{
"marking_ref": "7",
"selectors": ["x.y"],
},
{
"marking_ref": "8",
"selectors": ["x.y.[1]"],
},
{
"marking_ref": "9",
"selectors": ["x.z"],
},
{
"marking_ref": "10",
"selectors": ["x.z.foo2"],
},
],
}
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA])
def test_get_markings_smoke(data):
assert len(markings.get_markings(data, "a")) >= 1
assert markings.get_markings(data, "a") == ["1"]
@pytest.mark.parametrize(
"data", [
GET_MARKINGS_TEST_DATA,
{"b": 1234},
],
)
def test_get_markings_not_marked(data):
results = markings.get_markings(data, "b")
assert len(results) == 0
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA])
def test_get_markings_multiple_selectors(data):
total = markings.get_markings(data, ["x.y", "x.z"])
xy_markings = markings.get_markings(data, ["x.y"])
xz_markings = markings.get_markings(data, ["x.z"])
assert set(xy_markings).issubset(total)
assert set(xz_markings).issubset(total)
assert set(xy_markings).union(xz_markings).issuperset(total)
@pytest.mark.parametrize(
"data,selector", [
(GET_MARKINGS_TEST_DATA, "foo"),
(GET_MARKINGS_TEST_DATA, ""),
(GET_MARKINGS_TEST_DATA, []),
(GET_MARKINGS_TEST_DATA, [""]),
(GET_MARKINGS_TEST_DATA, "x.z.[-2]"),
(GET_MARKINGS_TEST_DATA, "c.f"),
(GET_MARKINGS_TEST_DATA, "c.[2].i"),
(GET_MARKINGS_TEST_DATA, "c.[3]"),
(GET_MARKINGS_TEST_DATA, "d"),
(GET_MARKINGS_TEST_DATA, "x.[0]"),
(GET_MARKINGS_TEST_DATA, "z.y.w"),
(GET_MARKINGS_TEST_DATA, "x.z.[1]"),
(GET_MARKINGS_TEST_DATA, "x.z.foo3"),
],
)
def test_get_markings_bad_selector(data, selector):
with pytest.raises(InvalidSelectorError):
markings.get_markings(data, selector)
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA])
def test_get_markings_positional_arguments_combinations(data):
assert set(markings.get_markings(data, "a", False, False)) == set(["1"])
assert set(markings.get_markings(data, "a", True, False)) == set(["1"])
assert set(markings.get_markings(data, "a", True, True)) == set(["1"])
assert set(markings.get_markings(data, "a", False, True)) == set(["1"])
assert set(markings.get_markings(data, "b", False, False)) == set([])
assert set(markings.get_markings(data, "b", True, False)) == set([])
assert set(markings.get_markings(data, "b", True, True)) == set([])
assert set(markings.get_markings(data, "b", False, True)) == set([])
assert set(markings.get_markings(data, "c", False, False)) == set(["2"])
assert set(markings.get_markings(data, "c", True, False)) == set(["2"])
assert set(markings.get_markings(data, "c", True, True)) == set(["2", "3", "4", "5"])
assert set(markings.get_markings(data, "c", False, True)) == set(["2", "3", "4", "5"])
assert set(markings.get_markings(data, "c.[0]", False, False)) == set([])
assert set(markings.get_markings(data, "c.[0]", True, False)) == set(["2"])
assert set(markings.get_markings(data, "c.[0]", True, True)) == set(["2"])
assert set(markings.get_markings(data, "c.[0]", False, True)) == set([])
assert set(markings.get_markings(data, "c.[1]", False, False)) == set(["3"])
assert set(markings.get_markings(data, "c.[1]", True, False)) == set(["2", "3"])
assert set(markings.get_markings(data, "c.[1]", True, True)) == set(["2", "3"])
assert set(markings.get_markings(data, "c.[1]", False, True)) == set(["3"])
assert set(markings.get_markings(data, "c.[2]", False, False)) == set(["4"])
assert set(markings.get_markings(data, "c.[2]", True, False)) == set(["2", "4"])
assert set(markings.get_markings(data, "c.[2]", True, True)) == set(["2", "4", "5"])
assert set(markings.get_markings(data, "c.[2]", False, True)) == set(["4", "5"])
assert set(markings.get_markings(data, "c.[2].g", False, False)) == set(["5"])
assert set(markings.get_markings(data, "c.[2].g", True, False)) == set(["2", "4", "5"])
assert set(markings.get_markings(data, "c.[2].g", True, True)) == set(["2", "4", "5"])
assert set(markings.get_markings(data, "c.[2].g", False, True)) == set(["5"])
assert set(markings.get_markings(data, "x", False, False)) == set(["6"])
assert set(markings.get_markings(data, "x", True, False)) == set(["6"])
assert set(markings.get_markings(data, "x", True, True)) == set(["6", "7", "8", "9", "10"])
assert set(markings.get_markings(data, "x", False, True)) == set(["6", "7", "8", "9", "10"])
assert set(markings.get_markings(data, "x.y", False, False)) == set(["7"])
assert set(markings.get_markings(data, "x.y", True, False)) == set(["6", "7"])
assert set(markings.get_markings(data, "x.y", True, True)) == set(["6", "7", "8"])
assert set(markings.get_markings(data, "x.y", False, True)) == set(["7", "8"])
assert set(markings.get_markings(data, "x.y.[0]", False, False)) == set([])
assert set(markings.get_markings(data, "x.y.[0]", True, False)) == set(["6", "7"])
assert set(markings.get_markings(data, "x.y.[0]", True, True)) == set(["6", "7"])
assert set(markings.get_markings(data, "x.y.[0]", False, True)) == set([])
assert set(markings.get_markings(data, "x.y.[1]", False, False)) == set(["8"])
assert set(markings.get_markings(data, "x.y.[1]", True, False)) == set(["6", "7", "8"])
assert set(markings.get_markings(data, "x.y.[1]", True, True)) == set(["6", "7", "8"])
assert set(markings.get_markings(data, "x.y.[1]", False, True)) == set(["8"])
assert set(markings.get_markings(data, "x.z", False, False)) == set(["9"])
assert set(markings.get_markings(data, "x.z", True, False)) == set(["6", "9"])
assert set(markings.get_markings(data, "x.z", True, True)) == set(["6", "9", "10"])
assert set(markings.get_markings(data, "x.z", False, True)) == set(["9", "10"])
assert set(markings.get_markings(data, "x.z.foo1", False, False)) == set([])
assert set(markings.get_markings(data, "x.z.foo1", True, False)) == set(["6", "9"])
assert set(markings.get_markings(data, "x.z.foo1", True, True)) == set(["6", "9"])
assert set(markings.get_markings(data, "x.z.foo1", False, True)) == set([])
assert set(markings.get_markings(data, "x.z.foo2", False, False)) == set(["10"])
assert set(markings.get_markings(data, "x.z.foo2", True, False)) == set(["6", "9", "10"])
assert set(markings.get_markings(data, "x.z.foo2", True, True)) == set(["6", "9", "10"])
assert set(markings.get_markings(data, "x.z.foo2", False, True)) == set(["10"])
GET_MARKINGS_TEST_DATA_LANGS = {
"a": 333,
"b": "value",
"c": [
17,
"list value",
{
"g": "nested",
"h": 45,
},
],
"x": {
"y": [
"hello",
88,
],
"z": {
"foo1": "bar",
"foo2": 65,
},
},
"granular_markings": [
{
"marking_ref": "m1",
"selectors": ["a"],
},
{
"marking_ref": "m2",
"selectors": ["c"],
},
{
"marking_ref": "m3",
"selectors": ["c.[1]"],
},
{
"marking_ref": "m4",
"selectors": ["c.[2]"],
},
{
"marking_ref": "m5",
"selectors": ["c.[2].g"],
},
{
"marking_ref": "m6",
"selectors": ["x"],
},
{
"lang": "l7",
"selectors": ["x.y"],
},
{
"marking_ref": "m8",
"selectors": ["x.y.[1]"],
},
{
"lang": "l9",
"selectors": ["x.z"],
},
{
"marking_ref": "m9",
"selectors": ["x.z"],
},
{
"marking_ref": "m10",
"selectors": ["x.z.foo2"],
},
],
}
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA_LANGS])
def test_get_markings_multiple_selectors_langs(data):
total = markings.get_markings(data, ["x.y", "x.z"])
xy_markings = markings.get_markings(data, ["x.y"])
xz_markings = markings.get_markings(data, ["x.z"])
assert set(xy_markings).issubset(total)
assert set(xz_markings).issubset(total)
assert set(xy_markings).union(xz_markings).issuperset(total)
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA_LANGS])
def test_get_markings_multiple_selectors_with_options(data):
total = markings.get_markings(data, ["x.y", "x.z"], lang=False)
xz_markings = markings.get_markings(data, ["x.z"], marking_ref=False)
assert len(total) == 1
assert len(xz_markings) == 1
@pytest.mark.parametrize(
"data", [
(
Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
),
[MARKING_IDS[0], MARKING_IDS[1]],
),
(
dict(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
),
[MARKING_IDS[0], MARKING_IDS[1]],
),
],
)
def test_remove_marking_remove_one_selector_with_multiple_refs(data):
before = markings.remove_markings(data[0], data[1], ["description"])
assert "granular_markings" not in before
def test_remove_marking_remove_multiple_selector_one_ref():
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, MARKING_IDS[0], ["description", "modified"])
assert "granular_markings" not in before
def test_remove_marking_mark_one_selector_from_multiple_ones():
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0]], ["modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_remove_marking_mark_one_selector_from_multiple_ones_mixed():
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"lang": MARKING_LANGS[0],
},
],
**MALWARE_KWARGS
)
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"lang": MARKING_LANGS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0], MARKING_LANGS[0]], ["modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_remove_marking_mark_one_selector_markings_from_multiple_ones():
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0]], ["modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_remove_marking_mark_mutilple_selector_multiple_refs():
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description", "modified"])
assert "granular_markings" not in before
def test_remove_marking_mark_another_property_same_marking():
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["modified"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0]], ["modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_remove_marking_mark_same_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.remove_markings(before, [MARKING_IDS[0]], ["description"])
assert "granular_markings" not in before
def test_remove_no_markings():
before = {
"description": "test description",
}
after = markings.remove_markings(before, ["marking-definition--1"], ["description"])
assert before == after
def test_remove_marking_bad_selector():
before = {
"description": "test description",
}
with pytest.raises(InvalidSelectorError):
markings.remove_markings(before, ["marking-definition--1", "marking-definition--2"], ["title"])
def test_remove_marking_not_present():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
with pytest.raises(MarkingNotFoundError):
markings.remove_markings(before, [MARKING_IDS[1]], ["description"])
IS_MARKED_TEST_DATA = [
Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["malware_types", "description"],
"marking_ref": MARKING_IDS[2],
},
{
"selectors": ["malware_types", "description"],
"marking_ref": MARKING_IDS[3],
},
{
"selectors": ["name"],
"lang": MARKING_LANGS[1],
},
],
**MALWARE_KWARGS
),
dict(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["malware_types", "description"],
"marking_ref": MARKING_IDS[2],
},
{
"selectors": ["malware_types", "description"],
"marking_ref": MARKING_IDS[3],
},
{
"selectors": ["name"],
"lang": MARKING_LANGS[1],
},
],
**MALWARE_KWARGS
),
]
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_smoke(data):
assert markings.is_marked(data, selectors=["description"])
assert markings.is_marked(data, selectors=["modified"]) is False
assert markings.is_marked(data, selectors=["name"])
@pytest.mark.parametrize(
"data,selector", [
(IS_MARKED_TEST_DATA[0], "foo"),
(IS_MARKED_TEST_DATA[0], ""),
(IS_MARKED_TEST_DATA[0], []),
(IS_MARKED_TEST_DATA[0], [""]),
(IS_MARKED_TEST_DATA[0], "x.z.[-2]"),
(IS_MARKED_TEST_DATA[0], "c.f"),
(IS_MARKED_TEST_DATA[0], "c.[2].i"),
(IS_MARKED_TEST_DATA[1], "c.[3]"),
(IS_MARKED_TEST_DATA[1], "d"),
(IS_MARKED_TEST_DATA[1], "x.[0]"),
(IS_MARKED_TEST_DATA[1], "z.y.w"),
(IS_MARKED_TEST_DATA[1], "x.z.[1]"),
(IS_MARKED_TEST_DATA[1], "x.z.foo3"),
],
)
def test_is_marked_invalid_selector(data, selector):
with pytest.raises(InvalidSelectorError):
markings.is_marked(data, selectors=selector)
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_mix_selector(data):
assert markings.is_marked(data, selectors=["description", "malware_types"])
assert markings.is_marked(data, selectors=["description"])
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_valid_selector_no_refs(data):
assert markings.is_marked(data, selectors=["description"])
assert markings.is_marked(data, [MARKING_IDS[2], MARKING_IDS[3]], ["description"])
assert markings.is_marked(data, [MARKING_IDS[2]], ["description"])
assert markings.is_marked(data, [MARKING_IDS[2], MARKING_IDS[5]], ["description"]) is False
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_valid_selector_and_refs(data):
assert markings.is_marked(data, [MARKING_IDS[1]], ["description"])
assert markings.is_marked(data, [MARKING_IDS[1]], ["modified"]) is False
assert markings.is_marked(data, [MARKING_LANGS[1]], ["name"])
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_valid_selector_multiple_refs(data):
assert markings.is_marked(data, [MARKING_IDS[2], MARKING_IDS[3]], ["malware_types"])
assert markings.is_marked(data, [MARKING_IDS[2], MARKING_IDS[1]], ["malware_types"]) is False
assert markings.is_marked(data, MARKING_IDS[2], ["malware_types"])
assert markings.is_marked(data, ["marking-definition--1234"], ["malware_types"]) is False
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_no_marking_refs(data):
assert markings.is_marked(data, selectors=["type"]) is False
assert markings.is_marked(data, selectors=["malware_types"])
@pytest.mark.parametrize("data", IS_MARKED_TEST_DATA)
def test_is_marked_no_selectors(data):
with pytest.raises(TypeError) as excinfo:
markings.granular_markings.is_marked(data)
assert "'selectors' must be provided" in str(excinfo.value)
def test_is_marked_positional_arguments_combinations():
test_sdo = \
{
"a": 333,
"b": "value",
"c": [
17,
"list value",
{
"g": "nested",
"h": 45,
},
],
"x": {
"y": [
"hello",
88,
],
"z": {
"foo1": "bar",
"foo2": 65,
},
},
"granular_markings": [
{
"marking_ref": "1",
"selectors": ["a"],
},
{
"marking_ref": "2",
"selectors": ["c"],
},
{
"marking_ref": "3",
"selectors": ["c.[1]"],
},
{
"marking_ref": "4",
"selectors": ["c.[2]"],
},
{
"marking_ref": "5",
"selectors": ["c.[2].g"],
},
{
"marking_ref": "6",
"selectors": ["x"],
},
{
"marking_ref": "7",
"selectors": ["x.y"],
},
{
"marking_ref": "8",
"selectors": ["x.y.[1]"],
},
{
"marking_ref": "9",
"selectors": ["x.z"],
},
{
"marking_ref": "10",
"selectors": ["x.z.foo2"],
},
],
}
assert markings.is_marked(test_sdo, ["1"], "a", False, False)
assert markings.is_marked(test_sdo, ["1"], "a", True, False)
assert markings.is_marked(test_sdo, ["1"], "a", True, True)
assert markings.is_marked(test_sdo, ["1"], "a", False, True)
assert markings.is_marked(test_sdo, "b", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, "b", inherited=True, descendants=False) is False
assert markings.is_marked(test_sdo, "b", inherited=True, descendants=True) is False
assert markings.is_marked(test_sdo, "b", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["2"], "c", False, False)
assert markings.is_marked(test_sdo, ["2"], "c", True, False)
assert markings.is_marked(test_sdo, ["2", "3", "4", "5"], "c", True, True)
assert markings.is_marked(test_sdo, ["2", "3", "4", "5"], "c", False, True)
assert markings.is_marked(test_sdo, "c.[0]", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["2"], "c.[0]", True, False)
assert markings.is_marked(test_sdo, ["2"], "c.[0]", True, True)
assert markings.is_marked(test_sdo, "c.[0]", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["3"], "c.[1]", False, False)
assert markings.is_marked(test_sdo, ["2", "3"], "c.[1]", True, False)
assert markings.is_marked(test_sdo, ["2", "3"], "c.[1]", True, True)
assert markings.is_marked(test_sdo, ["3"], "c.[1]", False, True)
assert markings.is_marked(test_sdo, ["4"], "c.[2]", False, False)
assert markings.is_marked(test_sdo, ["2", "4"], "c.[2]", True, False)
assert markings.is_marked(test_sdo, ["2", "4", "5"], "c.[2]", True, True)
assert markings.is_marked(test_sdo, ["4", "5"], "c.[2]", False, True)
assert markings.is_marked(test_sdo, ["5"], "c.[2].g", False, False)
assert markings.is_marked(test_sdo, ["2", "4", "5"], "c.[2].g", True, False)
assert markings.is_marked(test_sdo, ["2", "4", "5"], "c.[2].g", True, True)
assert markings.is_marked(test_sdo, ["5"], "c.[2].g", False, True)
assert markings.is_marked(test_sdo, ["6"], "x", False, False)
assert markings.is_marked(test_sdo, ["6"], "x", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "8", "9", "10"], "x", True, True)
assert markings.is_marked(test_sdo, ["6", "7", "8", "9", "10"], "x", False, True)
assert markings.is_marked(test_sdo, ["7"], "x.y", False, False)
assert markings.is_marked(test_sdo, ["6", "7"], "x.y", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "8"], "x.y", True, True)
assert markings.is_marked(test_sdo, ["7", "8"], "x.y", False, True)
assert markings.is_marked(test_sdo, "x.y.[0]", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["6", "7"], "x.y.[0]", True, False)
assert markings.is_marked(test_sdo, ["6", "7"], "x.y.[0]", True, True)
assert markings.is_marked(test_sdo, "x.y.[0]", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["8"], "x.y.[1]", False, False)
assert markings.is_marked(test_sdo, ["6", "7", "8"], "x.y.[1]", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "8"], "x.y.[1]", True, True)
assert markings.is_marked(test_sdo, ["8"], "x.y.[1]", False, True)
assert markings.is_marked(test_sdo, ["9"], "x.z", False, False)
assert markings.is_marked(test_sdo, ["6", "9"], "x.z", True, False)
assert markings.is_marked(test_sdo, ["6", "9", "10"], "x.z", True, True)
assert markings.is_marked(test_sdo, ["9", "10"], "x.z", False, True)
assert markings.is_marked(test_sdo, "x.z.foo1", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["6", "9"], "x.z.foo1", True, False)
assert markings.is_marked(test_sdo, ["6", "9"], "x.z.foo1", True, True)
assert markings.is_marked(test_sdo, "x.z.foo1", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["10"], "x.z.foo2", False, False)
assert markings.is_marked(test_sdo, ["6", "9", "10"], "x.z.foo2", True, False)
assert markings.is_marked(test_sdo, ["6", "9", "10"], "x.z.foo2", True, True)
assert markings.is_marked(test_sdo, ["10"], "x.z.foo2", False, True)
def test_create_sdo_with_invalid_marking():
with pytest.raises(InvalidSelectorError) as excinfo:
Malware(
granular_markings=[
{
"selectors": ["foo"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
assert str(excinfo.value) == "Selector foo in Malware is not valid!"
def test_set_marking_mark_one_selector_multiple_refs():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_one_selector_multiple_lang_refs():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"lang": MARKING_LANGS[0],
},
{
"selectors": ["description"],
"lang": MARKING_LANGS[1],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_LANGS[0], MARKING_LANGS[1]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_multiple_selector_one_refs():
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[0]], ["description", "modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_multiple_mixed_markings():
before = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["description", "modified"],
"lang": MARKING_LANGS[2],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[2],
},
{
"selectors": ["description", "modified"],
"lang": MARKING_LANGS[3],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[2], MARKING_LANGS[3]], ["description", "modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_multiple_selector_multiple_refs_from_none():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["description", "modified"],
"marking_ref": MARKING_IDS[1],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description", "modified"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
def test_set_marking_mark_another_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[2],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[1], MARKING_IDS[2]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
@pytest.mark.parametrize(
"marking", [
([MARKING_IDS[4], MARKING_IDS[5]], ["foo"]),
([MARKING_IDS[4], MARKING_IDS[5]], ""),
([MARKING_IDS[4], MARKING_IDS[5]], []),
([MARKING_IDS[4], MARKING_IDS[5]], [""]),
],
)
def test_set_marking_bad_selector(marking):
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
with pytest.raises(InvalidSelectorError):
before = markings.set_markings(before, marking[0], marking[1])
assert before == after
def test_set_marking_mark_same_property_same_marking():
before = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
after = Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[0]], ["description"])
for m in before["granular_markings"]:
assert m in after["granular_markings"]
CLEAR_MARKINGS_TEST_DATA = [
Malware(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["modified", "description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["modified", "description", "type"],
"marking_ref": MARKING_IDS[2],
},
],
**MALWARE_KWARGS
),
dict(
granular_markings=[
{
"selectors": ["description"],
"marking_ref": MARKING_IDS[0],
},
{
"selectors": ["modified", "description"],
"marking_ref": MARKING_IDS[1],
},
{
"selectors": ["modified", "description", "type"],
"marking_ref": MARKING_IDS[2],
},
],
**MALWARE_KWARGS
),
]
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_smoke(data):
data = markings.clear_markings(data, "modified")
assert markings.is_marked(data, "modified") is False
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_multiple_selectors(data):
data = markings.clear_markings(data, ["type", "description"])
assert markings.is_marked(data, ["type", "description"]) is False
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_one_selector(data):
data = markings.clear_markings(data, "description")
assert markings.is_marked(data, "description") is False
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_all_selectors(data):
data = markings.clear_markings(data, ["description", "type", "modified"])
assert markings.is_marked(data, "description") is False
assert "granular_markings" not in data
@pytest.mark.parametrize(
"data,selector", [
(CLEAR_MARKINGS_TEST_DATA[0], "foo"),
(CLEAR_MARKINGS_TEST_DATA[0], ""),
(CLEAR_MARKINGS_TEST_DATA[1], []),
(CLEAR_MARKINGS_TEST_DATA[1], [""]),
],
)
def test_clear_marking_bad_selector(data, selector):
with pytest.raises(InvalidSelectorError):
markings.clear_markings(data, selector)
@pytest.mark.parametrize("data", CLEAR_MARKINGS_TEST_DATA)
def test_clear_marking_not_present(data):
with pytest.raises(MarkingNotFoundError):
markings.clear_markings(data, ["malware_types"])
| true | true |
1c3195763ec9140a253961f18bd1ff67f126974a | 1,482 | py | Python | make_mozilla/news/migrations/0002_auto__add_field_article_featured.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 4 | 2015-05-08T16:58:53.000Z | 2019-09-06T05:30:59.000Z | make_mozilla/news/migrations/0002_auto__add_field_article_featured.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 2 | 2019-02-17T17:44:53.000Z | 2019-03-28T03:54:39.000Z | make_mozilla/news/migrations/0002_auto__add_field_article_featured.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 7 | 2015-05-21T15:38:29.000Z | 2019-10-28T23:39:06.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Article.featured'
db.add_column('news_article', 'featured',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Article.featured'
db.delete_column('news_article', 'featured')
models = {
'news.article': {
'Meta': {'object_name': 'Article'},
'autor': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'page': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['news'] | 42.342857 | 106 | 0.568826 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column('news_article', 'featured',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
db.delete_column('news_article', 'featured')
models = {
'news.article': {
'Meta': {'object_name': 'Article'},
'autor': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'page': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['news'] | true | true |
1c3195d838100a6aa479a48604c5f9b4b8a6f1bd | 1,463 | py | Python | src/app/routes.py | rkruegs123/geo-model-builder | f070afe18ed874b3a31db519ea6f593f40a1be00 | [
"Apache-2.0"
] | 3 | 2020-12-12T10:39:25.000Z | 2021-04-25T14:15:24.000Z | src/app/routes.py | rkruegs123/geo-model-builder | f070afe18ed874b3a31db519ea6f593f40a1be00 | [
"Apache-2.0"
] | null | null | null | src/app/routes.py | rkruegs123/geo-model-builder | f070afe18ed874b3a31db519ea6f593f40a1be00 | [
"Apache-2.0"
] | 2 | 2020-12-12T10:40:12.000Z | 2021-09-15T14:03:39.000Z | """
Copyright (c) 2020 Ryan Krueger. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Ryan Krueger, Jesse Michael Han, Daniel Selsam
"""
from flask import render_template, request, Response, send_file, jsonify
from app import app
from io import StringIO, BytesIO
import pdb
import base64
from builder import build
from util import DEFAULTS
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/solve', methods=['POST'])
def solve():
try:
jsdata = request.form['problem_input']
lines = str(jsdata).split('\n')
args = DEFAULTS
args['lines'] = lines
args['n_models'] = int(request.form['n_models'])
args['plot_freq'] = -1
args['losses_freq'] = -1
args['loss_freq'] = -1
args['unnamed_objects'] = (request.form['plot_unnamed'] == 'true')
figs = build(args, show_plot=False, encode_fig=True)
urls = list()
for fig in figs:
img = BytesIO()
fig.savefig(img, format='png')
fig.close()
img.seek(0)
plot_url = base64.b64encode(img.getvalue()).decode()
urls.append(f"data:image/png;base64,{plot_url}")
return jsonify(srcs=urls)
# return f"data:image/png;base64,{plot_url}"
except Exception as e:
return Response(
str(e),
status=400
)
| 27.092593 | 74 | 0.608339 |
from flask import render_template, request, Response, send_file, jsonify
from app import app
from io import StringIO, BytesIO
import pdb
import base64
from builder import build
from util import DEFAULTS
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/solve', methods=['POST'])
def solve():
try:
jsdata = request.form['problem_input']
lines = str(jsdata).split('\n')
args = DEFAULTS
args['lines'] = lines
args['n_models'] = int(request.form['n_models'])
args['plot_freq'] = -1
args['losses_freq'] = -1
args['loss_freq'] = -1
args['unnamed_objects'] = (request.form['plot_unnamed'] == 'true')
figs = build(args, show_plot=False, encode_fig=True)
urls = list()
for fig in figs:
img = BytesIO()
fig.savefig(img, format='png')
fig.close()
img.seek(0)
plot_url = base64.b64encode(img.getvalue()).decode()
urls.append(f"data:image/png;base64,{plot_url}")
return jsonify(srcs=urls)
except Exception as e:
return Response(
str(e),
status=400
)
| true | true |
1c3197e3b1c1e5963a29f43f10b65014e0b840b5 | 1,677 | py | Python | dmd_japan.py | brendanjmeade/dmd_gps | bddc401e7a2e8d0fb44fa830d58197f1319b11c9 | [
"MIT"
] | null | null | null | dmd_japan.py | brendanjmeade/dmd_gps | bddc401e7a2e8d0fb44fa830d58197f1319b11c9 | [
"MIT"
] | null | null | null | dmd_japan.py | brendanjmeade/dmd_gps | bddc401e7a2e8d0fb44fa830d58197f1319b11c9 | [
"MIT"
] | null | null | null | import json
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pydmd as dmd
plt.close("all")
with open('GeonetTimeSeries.json') as json_file:
data = json.load(json_file)
n_stations = len(data)
station_idx = 1
station_data = data[station_idx]
# Loop over all time series and extract starts and stops
all_start_seconds = np.zeros(n_stations)
all_n_days = np.zeros(n_stations)
for i in range(0, len(data)):
station_current = data[i]
all_start_seconds[i] = station_current["start"]
all_n_days[i] = len(station_current["lon"])
all_start_days = np.round((all_start_seconds - all_start_seconds.min()) / 86400).astype(int)
all_end_days = (all_start_days + all_n_days).astype(int)
lon_mat = np.zeros((n_stations, all_end_days.max().astype(int)))
lat_mat = np.zeros((n_stations, all_end_days.max().astype(int)))
for i in range(0, len(data)):
lon_mat[i, all_start_days[i] : all_end_days[i]] = data[i]["lon"]
lat_mat[i, all_start_days[i] : all_end_days[i]] = data[i]["lat"]
# Subtract out mean of each time series?
# FIll in missing data with linear interpolation?
# lon_mat[lon_mat == 0] = np.nan
# lat_mat[lat_mat == 0] = np.nan
plt.matshow(lon_mat[600:800, 3000:4000])
# plt.matshow(lat_mat[600:800, 3000:4000])
# Do I have to find a maximium sized submatrix with no nans?
# https://www.geeksforgeeks.org/maximum-size-sub-matrix-with-all-1s-in-a-binary-matrix/
# # Try dynamic mode decomposition
# dmdout = dmd.DMD(svd_rank=10, tlsq_rank=10, exact=True, opt=True)
dmdout = dmd.DMD(svd_rank=1)
dmdout.fit(lon_mat[600:800, 3000:4000])
# Plot decomposition
plt.matshow(dmdout.reconstructed_data.real)
plt.show(block=False)
| 28.913793 | 92 | 0.733453 | import json
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pydmd as dmd
plt.close("all")
with open('GeonetTimeSeries.json') as json_file:
data = json.load(json_file)
n_stations = len(data)
station_idx = 1
station_data = data[station_idx]
all_start_seconds = np.zeros(n_stations)
all_n_days = np.zeros(n_stations)
for i in range(0, len(data)):
station_current = data[i]
all_start_seconds[i] = station_current["start"]
all_n_days[i] = len(station_current["lon"])
all_start_days = np.round((all_start_seconds - all_start_seconds.min()) / 86400).astype(int)
all_end_days = (all_start_days + all_n_days).astype(int)
lon_mat = np.zeros((n_stations, all_end_days.max().astype(int)))
lat_mat = np.zeros((n_stations, all_end_days.max().astype(int)))
for i in range(0, len(data)):
lon_mat[i, all_start_days[i] : all_end_days[i]] = data[i]["lon"]
lat_mat[i, all_start_days[i] : all_end_days[i]] = data[i]["lat"]
plt.matshow(lon_mat[600:800, 3000:4000])
mdout.fit(lon_mat[600:800, 3000:4000])
plt.matshow(dmdout.reconstructed_data.real)
plt.show(block=False)
| true | true |
1c3199038a5fdf23ae658d8d6ef10f2a82f2d42f | 27,164 | py | Python | pyiomica/extendedDataFrame.py | benstear/pyiomica | bc26032b610fc911cc03b54115d6abdf53a56fce | [
"MIT"
] | null | null | null | pyiomica/extendedDataFrame.py | benstear/pyiomica | bc26032b610fc911cc03b54115d6abdf53a56fce | [
"MIT"
] | null | null | null | pyiomica/extendedDataFrame.py | benstear/pyiomica | bc26032b610fc911cc03b54115d6abdf53a56fce | [
"MIT"
] | null | null | null | '''PyIOmica Dataframe extending Pandas DataFrame with new functions'''
import sklearn.preprocessing
from .globalVariables import *
from . import utilityFunctions
from . import coreFunctions
class DataFrame(pd.DataFrame):
'''Class based on pandas.DataFrame extending capabilities into the doamin of PyIOmica
Initialization parameters are identical to those in pandas.DataFrame
See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html for detail.
'''
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
'''Initialization method'''
super().__init__(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
return
def filterOutAllZeroSignals(self, inplace=False):
"""Filter out all-zero signals from a DataFrame.
Parameters:
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.filterOutAllZeroSignals()
or
df_data.filterOutAllZeroSignalse(inplace=True)
"""
print('Filtering out all-zero signals')
init = self.shape[0]
new_data = self.loc[self.index[np.count_nonzero(self, axis=1) > 0]]
print('Removed ', init - new_data.shape[0], 'signals out of %s.' % init)
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def filterOutFractionZeroSignals(self, min_fraction_of_non_zeros, inplace=False):
"""Filter out fraction-zero signals from a DataFrame.
Parameters:
min_fraction_of_non_zeros: float
Maximum fraction of allowed zeros
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.filterOutFractionZeroSignals(0.75)
or
df_data.filterOutFractionZeroSignals(0.75, inplace=True)
"""
print('Filtering out low-quality signals (with more than %s%% zero points)' %(np.round(100.*(1.-min_fraction_of_non_zeros), 3)))
min_number_of_non_zero_points = np.int(np.ceil(min_fraction_of_non_zeros * self.shape[1]))
new_data = self.loc[self.index[np.count_nonzero(self, axis=1) >= min_number_of_non_zero_points]]
if (self.shape[0] - new_data.shape[0]) > 0:
print('Removed ', self.shape[0] - new_data.shape[0], 'signals out of %s.'%(self.shape[0]))
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def filterOutFractionMissingSignals(self, min_fraction_of_non_missing, inplace=False):
"""Filter out fraction-zero signals from a DataFrame.
Parameters:
min_fraction_of_non_missing: float
Maximum fraction of allowed zeros
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.filterOutFractionMissingSignals(0.75)
or
df_data.filterOutFractionMissingSignals(0.75, inplace=True)
"""
print('Filtering out low-quality signals (with more than %s%% missing points)' %(np.round(100.*(1.-min_fraction_of_non_missing), 3)))
min_number_of_non_zero_points = np.int(np.ceil(min_fraction_of_non_missing * self.shape[1]))
new_data = self.loc[self.index[(~np.isnan(self)).sum(axis=1) >= min_number_of_non_zero_points]]
if (self.shape[0] - new_data.shape[0]) > 0:
print('Removed ', self.shape[0] - new_data.shape[0], 'signals out of %s.'%(self.shape[0]))
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def filterOutReferencePointZeroSignals(self, referencePoint=0, inplace=False):
"""Filter out out first time point zeros signals from a DataFrame.
Parameters:
referencePoint: int, Default 0
Index of the reference point
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.filterOutFirstPointZeroSignals()
or
df_data.filterOutFirstPointZeroSignals(inplace=True)
"""
print('Filtering out first time point zeros signals')
new_data = self.loc[~(self.iloc[:,0] == 0.0)].copy()
if (self.shape[0] - new_data.shape[0]) > 0:
print('Removed ', self.shape[0] - new_data.shape[0], 'signals out of %s.'%(self.shape[0]))
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return self
def tagValueAsMissing(self, value=0.0, inplace=False):
"""Tag zero values with NaN.
Parameters:
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.tagValueAsMissing()
or
df_data.tagValueAsMissing(inplace=True)
"""
print('Tagging %s values with %s'%(value, np.NaN))
new_data = self.replace(to_replace=value, value=np.NaN, inplace=False)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def tagMissingAsValue(self, value=0.0, inplace=False):
"""Tag NaN with zero.
Parameters:
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.tagMissingAsValue()
or
df_data.tagMissingAsValue(inplace=True)
"""
print('Tagging %s values with %s'%(np.NaN, value))
new_data = self.replace(to_replace=np.NaN, value=value, inplace=False)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def tagLowValues(self, cutoff, replacement, inplace=False):
"""Tag low values with replacement value.
Parameters:
cutoff: float
Values below the "cutoff" are replaced with "replacement" value
replacement: float
Values below the "cutoff" are replaced with "replacement" value
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.tagLowValues(1., 1.)
or
df_data.tagLowValues(1., 1., inplace=True)
"""
print('Tagging low values (<=%s) with %s'%(cutoff, replacement))
new_data = self.mask(self <= cutoff, other=replacement, inplace=False)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def removeConstantSignals(self, theta_cutoff, inplace=False):
"""Remove constant signals.
Parameters:
theta_cutoff: float
Parameter for filtering the signals
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.removeConstantSignals(0.3)
or
df_data.removeConstantSignals(0.3, inplace=True)
"""
print('\nRemoving constant signals. Cutoff value is %s'%(theta_cutoff))
new_data = self.iloc[np.where(np.std(self,axis=1) / np.mean(np.std(self,axis=1)) > theta_cutoff)[0]]
print('Removed ', self.shape[0] - new_data.shape[0], 'signals out of %s.' % self.shape[0])
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def boxCoxTransform(self, axis=1, inplace=False):
"""Box-cox transform data.
Parameters:
axis: int, Default 1
Direction of processing, columns (1) or rows (0)
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.boxCoxTransformDataframe()
or
df_data.boxCoxTransformDataframe(inplace=True)
"""
print('Box-cox transforming raw data')
new_data = self.apply(coreFunctions.boxCoxTransform, axis=axis)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def modifiedZScore(self, axis=0, inplace=False):
"""Z-score (Median-based) transform data.
Parameters:
axis: int, Default 1
Direction of processing, rows (1) or columns (0)
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.modifiedZScoreDataframe()
or
df_data.modifiedZScoreDataframe(inplace=True)
"""
print('Z-score (Median-based) transforming box-cox transformed data')
new_data = self.copy()
new_data = new_data.apply(coreFunctions.modifiedZScore, axis=axis)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def normalizeSignalsToUnity(self, referencePoint=0, inplace=False):
"""Normalize signals to unity.
Parameters:
referencePoint: int, Default 0
Index of the reference point
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.normalizeSignalsToUnityDataframe()
or
df_data.normalizeSignalsToUnityDataframe(inplace=True)
"""
print('Normalizing signals to unity')
if not referencePoint is None:
#Subtract reference time-point value from all time-points
new_data = self.compareTimeSeriesToPoint(point=referencePoint, inplace=False).copy()
else:
new_data = self.copy()
where_nan = np.isnan(new_data.values.astype(float))
new_data[where_nan] = 0.0
new_data = new_data.apply(lambda data: data / np.sqrt(np.dot(data,data)),axis=1)
new_data[where_nan] = np.nan
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def quantileNormalize(self, output_distribution='original', averaging=np.mean, ties=np.mean, inplace=False):
"""Quantile Normalize signals in a DataFrame.
Note that it is possible there may be equal values within the dataset. In such a scenario, by default, the quantile
normalization implementation considered here works by replacing the degenerate values with the mean over all the degenerate ranks.
Note, that for the default option to work the data should not have any missing values.
If output_distribution is set to 'uniform' or 'normal' then the scikit-learn's Quantile Transformation is used.
Parameters:
output_distribution: str, Default 'original'
Output distribution. Other options are 'normal' and 'uniform'
averaging: function, Default np.mean
With what value to replace the same-rank elements across samples.
Default is to take the mean of same-rank elements
ties: function or str, Default np.mean
Function or name of the function. How ties should be handled. Default is to replace ties with their mean.
Other possible options are: 'mean', 'median', 'prod', 'sum', etc.
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = pd.DataFrame(index=['Gene 1','Gene 2','Gene 3','Gene 4'], columns=['Col 0','Col 1','Col 2'], data=np.array([[5, 4, 3], [2, 1, 4], [3, 4, 6], [4, 2, 8]]))
df_data = df_data.quantileNormalize()
or
df_data.df_data.quantileNormalize(inplace=True)
"""
print('Quantile normalizing signals...')
if output_distribution=='original':
def rankTransform(series, weights):
se_temp = pd.Series(index=scipy.stats.rankdata(series.values, method='min'),
data=weights[scipy.stats.rankdata(series.values, method='ordinal')-1])
series[:] = pd.Series(se_temp.index).replace(to_replace=se_temp.groupby(level=0).agg(ties).to_dict()).values
return series
weights = averaging(np.sort(self.values, axis=0), axis=1)
new_data = self.copy()
new_data = new_data.apply(lambda col: rankTransform(col, weights), axis=0)
elif output_distribution=='normal' or output_distribution=='uniform':
new_data = self.copy()
new_data.iloc[:] = sklearn.preprocessing.quantile_transform(self.values, output_distribution=output_distribution, n_quantiles=min(self.shape[0],1000), copy=False)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def compareTimeSeriesToPoint(self, point='first', inplace=False):
"""Subtract a particular point of each time series (row) of a Dataframe.
Parameters:
point: str, int or float
Possible options are 'first', 'last', 0, 1, ... , 10, or a value.
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.compareTimeSeriesToPoint()
or
df_data.compareTimeSeriesToPoint(df_data)
"""
independent = True
if point == 'first':
idx = 0
elif point == 'last':
idx = len(self.columns) - 1
elif type(point) is int:
idx = point
elif type(point) is float:
independent = False
else:
print("Specify a valid comparison point: 'first', 'last', 0, 1, ..., 10, etc., or a value")
return
new_data = self.copy()
if independent:
new_data.iloc[:] = (self.values.T - self.values.T[idx]).T
else:
new_data.iloc[:] = (self.values.T - point).T
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def compareTwoTimeSeries(self, df, function=np.subtract, compareAllLevelsInIndex=True, mergeFunction=np.mean):
"""Create a new Dataframe based on comparison of two existing Dataframes.
Parameters:
df: pandas.DataFrame
Data to compare
function: function, Default np.subtract
Other options are np.add, np.divide, or another <ufunc>.
compareAllLevelsInIndex: boolean, Default True
Whether to compare all levels in index.
If False only "source" and "id" will be compared
mergeFunction: function, Default np.mean
Input Dataframes are merged with this function,
i.e. np.mean (default), np.median, np.max, or another <ufunc>.
Returns:
DataFrame or None
Processed data
Usage:
df_data = df_dataH2.compareTwoTimeSeries(df_dataH1, function=np.subtract, compareAllLevelsInIndex=False, mergeFunction=np.median)
"""
if self.index.names!=df.index.names:
errMsg = 'Index of Dataframe 1 is not of the same shape as index of Dataframe 2!'
print(errMsg)
return errMsg
if compareAllLevelsInIndex:
df1_grouped, df2_grouped = self, df
else:
def aggregate(df):
return df.groupby(level=['source', 'id']).agg(mergeFunction)
df1_grouped, df2_grouped = aggregate(self), aggregate(df)
index = pd.MultiIndex.from_tuples(list(set(df1_grouped.index.values).intersection(set(df2_grouped.index.values))),
names=df1_grouped.index.names)
return function(df1_grouped.loc[index], df2_grouped.loc[index])
def imputeMissingWithMedian(self, axis=1, inplace=False):
"""Normalize signals to unity.
Parameters:
axis: int, Default 1
Axis to apply trasnformation along
inplace: boolean, Default False
Whether to modify data in place or return a new one
Returns:
Dataframe or None
Processed data
Usage:
df_data = df_data.imputeMissingWithMedian()
or
df_data.imputeMissingWithMedian(inplace=True)
"""
def tempFunction(data):
data[np.isnan(data)] = np.median(data[np.isnan(data) == False])
return data
new_data = self.apply(tempFunction, axis=axis)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return data
def mergeDataframes(listOfDataframes, axis=0):
"""Merge a list of Dataframes (outer join).
Parameters:
listOfDataframes: list
List of pandas.DataFrames
axis: int, Default 0
Merge direction. 0 to stack vertically, 1 to stack horizontally
Returns:
pandas.Dataframe
Processed data
Usage:
df_data = mergeDataframes([df_data1, df_data2])
"""
if len(listOfDataframes)==0:
return None
elif len(listOfDataframes)==1:
return listOfDataframes[0]
df = pd.concat(listOfDataframes, sort=False, axis=axis)
return DataFrame(df)
def getLobmScarglePeriodogramOfDataframe(df_data, NumberOfCPUs=4, parallel=True):
"""Calculate Lobm-Scargle periodogram of DataFrame.
Parameters:
df: pandas.DataFrame
Data to process
parallel: boolean, Default True
Whether to calculate in parallel mode (>1 process)
NumberOfCPUs: int, Default 4
Number of processes to create if parallel is True
Returns:
pandas.Dataframe
Lomb-Scargle periodograms
Usage:
df_periodograms = getLobmScarglePeriodogramOfDataframe(df_data)
"""
if parallel:
results = utilityFunctions.runCPUs(NumberOfCPUs, coreFunctions.pLombScargle, [(series.index[~np.isnan(series)].values, series[~np.isnan(series)].values, df_data.columns.values) for index, series in df_data.iterrows()])
df_periodograms = pd.DataFrame(data=results[1::2], index=df_data.index, columns=results[0])
else:
frequencies = None
intensities = []
for index, series in df_data.iterrows():
values = series[~np.isnan(series)].values
times = series.index[~np.isnan(series)].values
tempFrequencies, tempIntensities = coreFunctions.LombScargle(times, values, series.index.values, OversamplingRate=1)
if frequencies is None:
frequencies = tempFrequencies
intensities.append(tempIntensities)
df_periodograms = pd.DataFrame(data=np.vstack(intensities), index=df_data.index, columns=frequencies)
return DataFrame(df_periodograms)
def getRandomSpikesCutoffs(df_data, p_cutoff, NumberOfRandomSamples=10**3):
"""Calculate spikes cuttoffs from a bootstrap of provided data,
gived the significance cutoff p_cutoff.
Parameters:
df_data: pandas.DataFrame
Data where rows are normalized signals
p_cutoff: float
p-Value cutoff, e.g. 0.01
NumberOfRandomSamples: int, Default 1000
Size of the bootstrap distribution
Returns:
dictionary
Dictionary of spike cutoffs.
Usage:
cutoffs = getSpikesCutoffs(df_data, 0.01)
"""
data = np.vstack([np.random.choice(df_data.values[:,i], size=NumberOfRandomSamples, replace=True) for i in range(len(df_data.columns.values))]).T
df_data_random = DataFrame(pd.DataFrame(data=data, index=range(NumberOfRandomSamples), columns=df_data.columns))
df_data_random.filterOutFractionZeroSignals(0.75, inplace=True)
df_data_random.normalizeSignalsToUnity(inplace=True)
df_data_random.removeConstantSignals(0., inplace=True)
data = df_data_random.values
counts_non_missing = np.sum(~np.isnan(data), axis=1)
data[np.isnan(data)] = 0.
cutoffs = {}
for i in list(range(data.shape[1]+1)):
idata = data[counts_non_missing==i]
if len(idata)>0:
cutoffs.update({i : (np.quantile(np.max(idata, axis=1), 1.-p_cutoff, interpolation='lower'),
np.quantile(np.min(idata, axis=1), p_cutoff, interpolation='lower'))} )
return cutoffs
def getRandomAutocorrelations(df_data, NumberOfRandomSamples=10**5, NumberOfCPUs=4, fraction=0.75, referencePoint=0):
"""Generate autocorrelation null-distribution from permutated data using Lomb-Scargle Autocorrelation.
NOTE: there should be already no missing or non-numeric points in the input Series or Dataframe
Parameters:
df_data: pandas.Series or pandas.Dataframe
NumberOfRandomSamples: int, Default 10**5
Size of the distribution to generate
NumberOfCPUs: int, Default 4
Number of processes to run simultaneously
Returns:
pandas.DataFrame
Dataframe containing autocorrelations of null-distribution of data.
Usage:
result = getRandomAutocorrelations(df_data)
"""
data = np.vstack([np.random.choice(df_data.values[:,i], size=NumberOfRandomSamples, replace=True) for i in range(len(df_data.columns.values))]).T
df_data_random = DataFrame(pd.DataFrame(data=data, index=range(NumberOfRandomSamples), columns=df_data.columns))
df_data_random.filterOutFractionZeroSignals(fraction, inplace=True)
df_data_random.normalizeSignalsToUnity(inplace=True, referencePoint=referencePoint)
df_data_random.removeConstantSignals(0., inplace=True)
print('\nCalculating autocorrelations of %s random samples (sampled with replacement)...'%(df_data_random.shape[0]))
results = utilityFunctions.runCPUs(NumberOfCPUs, coreFunctions.pAutocorrelation, [(df_data_random.iloc[i].index.values.copy(),
df_data_random.iloc[i].values.copy(),
df_data.columns.values.copy()) for i in range(df_data_random.shape[0])])
return pd.DataFrame(data=results[1::2], columns=results[0])
def getRandomPeriodograms(df_data, NumberOfRandomSamples=10**5, NumberOfCPUs=4, fraction=0.75, referencePoint=0):
"""Generate periodograms null-distribution from permutated data using Lomb-Scargle function.
Parameters:
df_data: pandas.Series or pandas.Dataframe
NumberOfRandomSamples: int, Default 10**5
Size of the distribution to generate
NumberOfCPUs: int, Default 4
Number of processes to run simultaneously
Returns:
pandas.DataFrame
Dataframe containing periodograms
Usage:
result = getRandomPeriodograms(df_data)
"""
data = np.vstack([np.random.choice(df_data.values[:,i], size=NumberOfRandomSamples, replace=True) for i in range(len(df_data.columns.values))]).T
df_data_random = DataFrame(pd.DataFrame(data=data, index=range(NumberOfRandomSamples), columns=df_data.columns))
df_data_random.filterOutFractionZeroSignals(fraction, inplace=True)
df_data_random.normalizeSignalsToUnity(inplace=True, referencePoint=referencePoint)
df_data_random.removeConstantSignals(0., inplace=True)
print('\nCalculating periodograms of %s random samples (sampled with replacement)...'%(df_data_random.shape[0]))
return getLobmScarglePeriodogramOfDataframe(df_data_random, NumberOfCPUs=NumberOfCPUs)
| 33.289216 | 227 | 0.598623 |
import sklearn.preprocessing
from .globalVariables import *
from . import utilityFunctions
from . import coreFunctions
class DataFrame(pd.DataFrame):
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
super().__init__(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
return
def filterOutAllZeroSignals(self, inplace=False):
print('Filtering out all-zero signals')
init = self.shape[0]
new_data = self.loc[self.index[np.count_nonzero(self, axis=1) > 0]]
print('Removed ', init - new_data.shape[0], 'signals out of %s.' % init)
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def filterOutFractionZeroSignals(self, min_fraction_of_non_zeros, inplace=False):
print('Filtering out low-quality signals (with more than %s%% zero points)' %(np.round(100.*(1.-min_fraction_of_non_zeros), 3)))
min_number_of_non_zero_points = np.int(np.ceil(min_fraction_of_non_zeros * self.shape[1]))
new_data = self.loc[self.index[np.count_nonzero(self, axis=1) >= min_number_of_non_zero_points]]
if (self.shape[0] - new_data.shape[0]) > 0:
print('Removed ', self.shape[0] - new_data.shape[0], 'signals out of %s.'%(self.shape[0]))
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def filterOutFractionMissingSignals(self, min_fraction_of_non_missing, inplace=False):
print('Filtering out low-quality signals (with more than %s%% missing points)' %(np.round(100.*(1.-min_fraction_of_non_missing), 3)))
min_number_of_non_zero_points = np.int(np.ceil(min_fraction_of_non_missing * self.shape[1]))
new_data = self.loc[self.index[(~np.isnan(self)).sum(axis=1) >= min_number_of_non_zero_points]]
if (self.shape[0] - new_data.shape[0]) > 0:
print('Removed ', self.shape[0] - new_data.shape[0], 'signals out of %s.'%(self.shape[0]))
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def filterOutReferencePointZeroSignals(self, referencePoint=0, inplace=False):
print('Filtering out first time point zeros signals')
new_data = self.loc[~(self.iloc[:,0] == 0.0)].copy()
if (self.shape[0] - new_data.shape[0]) > 0:
print('Removed ', self.shape[0] - new_data.shape[0], 'signals out of %s.'%(self.shape[0]))
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return self
def tagValueAsMissing(self, value=0.0, inplace=False):
print('Tagging %s values with %s'%(value, np.NaN))
new_data = self.replace(to_replace=value, value=np.NaN, inplace=False)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def tagMissingAsValue(self, value=0.0, inplace=False):
print('Tagging %s values with %s'%(np.NaN, value))
new_data = self.replace(to_replace=np.NaN, value=value, inplace=False)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def tagLowValues(self, cutoff, replacement, inplace=False):
print('Tagging low values (<=%s) with %s'%(cutoff, replacement))
new_data = self.mask(self <= cutoff, other=replacement, inplace=False)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def removeConstantSignals(self, theta_cutoff, inplace=False):
print('\nRemoving constant signals. Cutoff value is %s'%(theta_cutoff))
new_data = self.iloc[np.where(np.std(self,axis=1) / np.mean(np.std(self,axis=1)) > theta_cutoff)[0]]
print('Removed ', self.shape[0] - new_data.shape[0], 'signals out of %s.' % self.shape[0])
print('Remaining ', new_data.shape[0], 'signals!')
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def boxCoxTransform(self, axis=1, inplace=False):
print('Box-cox transforming raw data')
new_data = self.apply(coreFunctions.boxCoxTransform, axis=axis)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def modifiedZScore(self, axis=0, inplace=False):
print('Z-score (Median-based) transforming box-cox transformed data')
new_data = self.copy()
new_data = new_data.apply(coreFunctions.modifiedZScore, axis=axis)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def normalizeSignalsToUnity(self, referencePoint=0, inplace=False):
print('Normalizing signals to unity')
if not referencePoint is None:
new_data = self.compareTimeSeriesToPoint(point=referencePoint, inplace=False).copy()
else:
new_data = self.copy()
where_nan = np.isnan(new_data.values.astype(float))
new_data[where_nan] = 0.0
new_data = new_data.apply(lambda data: data / np.sqrt(np.dot(data,data)),axis=1)
new_data[where_nan] = np.nan
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def quantileNormalize(self, output_distribution='original', averaging=np.mean, ties=np.mean, inplace=False):
print('Quantile normalizing signals...')
if output_distribution=='original':
def rankTransform(series, weights):
se_temp = pd.Series(index=scipy.stats.rankdata(series.values, method='min'),
data=weights[scipy.stats.rankdata(series.values, method='ordinal')-1])
series[:] = pd.Series(se_temp.index).replace(to_replace=se_temp.groupby(level=0).agg(ties).to_dict()).values
return series
weights = averaging(np.sort(self.values, axis=0), axis=1)
new_data = self.copy()
new_data = new_data.apply(lambda col: rankTransform(col, weights), axis=0)
elif output_distribution=='normal' or output_distribution=='uniform':
new_data = self.copy()
new_data.iloc[:] = sklearn.preprocessing.quantile_transform(self.values, output_distribution=output_distribution, n_quantiles=min(self.shape[0],1000), copy=False)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def compareTimeSeriesToPoint(self, point='first', inplace=False):
independent = True
if point == 'first':
idx = 0
elif point == 'last':
idx = len(self.columns) - 1
elif type(point) is int:
idx = point
elif type(point) is float:
independent = False
else:
print("Specify a valid comparison point: 'first', 'last', 0, 1, ..., 10, etc., or a value")
return
new_data = self.copy()
if independent:
new_data.iloc[:] = (self.values.T - self.values.T[idx]).T
else:
new_data.iloc[:] = (self.values.T - point).T
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return
def compareTwoTimeSeries(self, df, function=np.subtract, compareAllLevelsInIndex=True, mergeFunction=np.mean):
if self.index.names!=df.index.names:
errMsg = 'Index of Dataframe 1 is not of the same shape as index of Dataframe 2!'
print(errMsg)
return errMsg
if compareAllLevelsInIndex:
df1_grouped, df2_grouped = self, df
else:
def aggregate(df):
return df.groupby(level=['source', 'id']).agg(mergeFunction)
df1_grouped, df2_grouped = aggregate(self), aggregate(df)
index = pd.MultiIndex.from_tuples(list(set(df1_grouped.index.values).intersection(set(df2_grouped.index.values))),
names=df1_grouped.index.names)
return function(df1_grouped.loc[index], df2_grouped.loc[index])
def imputeMissingWithMedian(self, axis=1, inplace=False):
def tempFunction(data):
data[np.isnan(data)] = np.median(data[np.isnan(data) == False])
return data
new_data = self.apply(tempFunction, axis=axis)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
return data
def mergeDataframes(listOfDataframes, axis=0):
if len(listOfDataframes)==0:
return None
elif len(listOfDataframes)==1:
return listOfDataframes[0]
df = pd.concat(listOfDataframes, sort=False, axis=axis)
return DataFrame(df)
def getLobmScarglePeriodogramOfDataframe(df_data, NumberOfCPUs=4, parallel=True):
if parallel:
results = utilityFunctions.runCPUs(NumberOfCPUs, coreFunctions.pLombScargle, [(series.index[~np.isnan(series)].values, series[~np.isnan(series)].values, df_data.columns.values) for index, series in df_data.iterrows()])
df_periodograms = pd.DataFrame(data=results[1::2], index=df_data.index, columns=results[0])
else:
frequencies = None
intensities = []
for index, series in df_data.iterrows():
values = series[~np.isnan(series)].values
times = series.index[~np.isnan(series)].values
tempFrequencies, tempIntensities = coreFunctions.LombScargle(times, values, series.index.values, OversamplingRate=1)
if frequencies is None:
frequencies = tempFrequencies
intensities.append(tempIntensities)
df_periodograms = pd.DataFrame(data=np.vstack(intensities), index=df_data.index, columns=frequencies)
return DataFrame(df_periodograms)
def getRandomSpikesCutoffs(df_data, p_cutoff, NumberOfRandomSamples=10**3):
data = np.vstack([np.random.choice(df_data.values[:,i], size=NumberOfRandomSamples, replace=True) for i in range(len(df_data.columns.values))]).T
df_data_random = DataFrame(pd.DataFrame(data=data, index=range(NumberOfRandomSamples), columns=df_data.columns))
df_data_random.filterOutFractionZeroSignals(0.75, inplace=True)
df_data_random.normalizeSignalsToUnity(inplace=True)
df_data_random.removeConstantSignals(0., inplace=True)
data = df_data_random.values
counts_non_missing = np.sum(~np.isnan(data), axis=1)
data[np.isnan(data)] = 0.
cutoffs = {}
for i in list(range(data.shape[1]+1)):
idata = data[counts_non_missing==i]
if len(idata)>0:
cutoffs.update({i : (np.quantile(np.max(idata, axis=1), 1.-p_cutoff, interpolation='lower'),
np.quantile(np.min(idata, axis=1), p_cutoff, interpolation='lower'))} )
return cutoffs
def getRandomAutocorrelations(df_data, NumberOfRandomSamples=10**5, NumberOfCPUs=4, fraction=0.75, referencePoint=0):
data = np.vstack([np.random.choice(df_data.values[:,i], size=NumberOfRandomSamples, replace=True) for i in range(len(df_data.columns.values))]).T
df_data_random = DataFrame(pd.DataFrame(data=data, index=range(NumberOfRandomSamples), columns=df_data.columns))
df_data_random.filterOutFractionZeroSignals(fraction, inplace=True)
df_data_random.normalizeSignalsToUnity(inplace=True, referencePoint=referencePoint)
df_data_random.removeConstantSignals(0., inplace=True)
print('\nCalculating autocorrelations of %s random samples (sampled with replacement)...'%(df_data_random.shape[0]))
results = utilityFunctions.runCPUs(NumberOfCPUs, coreFunctions.pAutocorrelation, [(df_data_random.iloc[i].index.values.copy(),
df_data_random.iloc[i].values.copy(),
df_data.columns.values.copy()) for i in range(df_data_random.shape[0])])
return pd.DataFrame(data=results[1::2], columns=results[0])
def getRandomPeriodograms(df_data, NumberOfRandomSamples=10**5, NumberOfCPUs=4, fraction=0.75, referencePoint=0):
data = np.vstack([np.random.choice(df_data.values[:,i], size=NumberOfRandomSamples, replace=True) for i in range(len(df_data.columns.values))]).T
df_data_random = DataFrame(pd.DataFrame(data=data, index=range(NumberOfRandomSamples), columns=df_data.columns))
df_data_random.filterOutFractionZeroSignals(fraction, inplace=True)
df_data_random.normalizeSignalsToUnity(inplace=True, referencePoint=referencePoint)
df_data_random.removeConstantSignals(0., inplace=True)
print('\nCalculating periodograms of %s random samples (sampled with replacement)...'%(df_data_random.shape[0]))
return getLobmScarglePeriodogramOfDataframe(df_data_random, NumberOfCPUs=NumberOfCPUs)
| true | true |
1c319b286435ee2d468ebd6c9e17c0030699f9b7 | 586 | py | Python | nativecap/__init__.py | aisouard/pynativecap | 2d800ff4fee5dbade642d631dfb4c57e9f36d8e7 | [
"MIT"
] | 1 | 2021-01-26T08:58:26.000Z | 2021-01-26T08:58:26.000Z | nativecap/__init__.py | aisouard/nativecap | 2d800ff4fee5dbade642d631dfb4c57e9f36d8e7 | [
"MIT"
] | null | null | null | nativecap/__init__.py | aisouard/nativecap | 2d800ff4fee5dbade642d631dfb4c57e9f36d8e7 | [
"MIT"
] | null | null | null | import ctypes
import glob
import os
import platform
system = platform.system()
extension = ".pyd" if system == "Windows" else ".so"
glob_str = os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..', 'nativecap*{}'.format(extension))
library = ctypes.CDLL(glob.glob(glob_str)[0])
library.nativecap.argtypes = [
ctypes.c_int32,
ctypes.c_int32,
ctypes.c_int32,
ctypes.c_int32,
ctypes.c_void_p
]
def capture(x, y, width, height):
buffer = (ctypes.c_ubyte * (width * height * 4))()
library.nativecap(x, y, width, height, buffer)
return buffer
| 23.44 | 71 | 0.679181 | import ctypes
import glob
import os
import platform
system = platform.system()
extension = ".pyd" if system == "Windows" else ".so"
glob_str = os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..', 'nativecap*{}'.format(extension))
library = ctypes.CDLL(glob.glob(glob_str)[0])
library.nativecap.argtypes = [
ctypes.c_int32,
ctypes.c_int32,
ctypes.c_int32,
ctypes.c_int32,
ctypes.c_void_p
]
def capture(x, y, width, height):
buffer = (ctypes.c_ubyte * (width * height * 4))()
library.nativecap(x, y, width, height, buffer)
return buffer
| true | true |
1c319b2873d919c1cff81621dcdb3a70985ed8c9 | 6,902 | py | Python | cv_lib/classification/models/cifar_large_resnet.py | zhfeing/deep-learning-lib-PyTorch | 1a4e1c1939a42c30fe32dd8d6aff210e8604e77b | [
"MIT"
] | 4 | 2021-03-29T07:34:21.000Z | 2021-04-25T08:25:30.000Z | cv_lib/classification/models/cifar_large_resnet.py | zhfeing/deep-learning-lib | f96e3a71ae2dbeb44696725ec127ff8f37d4c6e9 | [
"MIT"
] | null | null | null | cv_lib/classification/models/cifar_large_resnet.py | zhfeing/deep-learning-lib | f96e3a71ae2dbeb44696725ec127ff8f37d4c6e9 | [
"MIT"
] | 1 | 2021-03-30T07:13:31.000Z | 2021-03-30T07:13:31.000Z | import torch.nn as nn
import torch.nn.functional as F
__all__ = [
"ResNet_CL",
"resnet18_cl",
"resnet34_cl",
"resnet50_cl",
"resnet101_cl",
"resnet152_cl"
]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet_CL(nn.Module):
"""
Resnet for cifar dataset (large version).
@ Different from PyTorch version `in ()`:
1. First conv layer has kernel size of 3 (7) and stride 1 (2)
2. Using non-inplace relu for feature extracting
"""
def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False):
super(ResNet_CL, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
feat_m.append(self.layer4)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
bn4 = self.layer4[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
bn4 = self.layer4[-1].bn2
else:
raise NotImplementedError("ResNet unknown block error !!!")
return [bn1, bn2, bn3, bn4]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out, f4_pre = self.layer4(out)
f4 = out
out = self.avgpool(out)
out = out.view(out.size(0), -1)
f5 = out
out = self.linear(out)
if is_feat:
if preact:
return [[f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], out]
else:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def resnet18_cl(**kwargs):
return ResNet_CL(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet34_cl(**kwargs):
return ResNet_CL(BasicBlock, [3, 4, 6, 3], **kwargs)
def resnet50_cl(**kwargs):
return ResNet_CL(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet101_cl(**kwargs):
return ResNet_CL(Bottleneck, [3, 4, 23, 3], **kwargs)
def resnet152_cl(**kwargs):
return ResNet_CL(Bottleneck, [3, 8, 36, 3], **kwargs)
| 35.57732 | 107 | 0.572733 | import torch.nn as nn
import torch.nn.functional as F
__all__ = [
"ResNet_CL",
"resnet18_cl",
"resnet34_cl",
"resnet50_cl",
"resnet101_cl",
"resnet152_cl"
]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet_CL(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False):
super(ResNet_CL, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
feat_m.append(self.layer4)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
bn4 = self.layer4[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
bn4 = self.layer4[-1].bn2
else:
raise NotImplementedError("ResNet unknown block error !!!")
return [bn1, bn2, bn3, bn4]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out, f4_pre = self.layer4(out)
f4 = out
out = self.avgpool(out)
out = out.view(out.size(0), -1)
f5 = out
out = self.linear(out)
if is_feat:
if preact:
return [[f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], out]
else:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def resnet18_cl(**kwargs):
return ResNet_CL(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet34_cl(**kwargs):
return ResNet_CL(BasicBlock, [3, 4, 6, 3], **kwargs)
def resnet50_cl(**kwargs):
return ResNet_CL(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet101_cl(**kwargs):
return ResNet_CL(Bottleneck, [3, 4, 23, 3], **kwargs)
def resnet152_cl(**kwargs):
return ResNet_CL(Bottleneck, [3, 8, 36, 3], **kwargs)
| true | true |
1c319b288707000dc9d4b775eea4ea744017f477 | 2,287 | py | Python | src/greentest/test___monkey_patching.py | felixonmars/gevent | 400a4099a8c74999afce22e821ba741436085e36 | [
"MIT"
] | 1 | 2019-05-09T08:59:55.000Z | 2019-05-09T08:59:55.000Z | src/greentest/test___monkey_patching.py | felixonmars/gevent | 400a4099a8c74999afce22e821ba741436085e36 | [
"MIT"
] | null | null | null | src/greentest/test___monkey_patching.py | felixonmars/gevent | 400a4099a8c74999afce22e821ba741436085e36 | [
"MIT"
] | null | null | null | import sys
import os
import glob
import util
import atexit
# subprocess: include in subprocess tests
TIMEOUT = 120
directory = '%s.%s' % sys.version_info[:2]
full_directory = '%s.%s.%s' % sys.version_info[:3]
if hasattr(sys, 'pypy_version_info'):
directory += 'pypy'
full_directory += 'pypy'
version = '%s.%s.%s' % sys.version_info[:3]
def get_absolute_pythonpath():
paths = [os.path.abspath(p) for p in os.environ.get('PYTHONPATH', '').split(os.pathsep)]
return os.pathsep.join(paths)
def TESTRUNNER(tests=None):
if not os.path.exists(directory):
util.log('WARNING: No test directory found at %s', directory)
return
with open(os.path.join(directory, 'version')) as f:
preferred_version = f.read().strip()
if preferred_version != version:
util.log('WARNING: The tests in %s/ are from version %s and your Python is %s', directory, preferred_version, version)
if not tests:
tests = glob.glob('%s/test_*.py' % directory)
version_tests = glob.glob('%s/test_*.py' % full_directory)
tests = sorted(tests)
version_tests = sorted(version_tests)
PYTHONPATH = (os.getcwd() + os.pathsep + get_absolute_pythonpath()).rstrip(':')
tests = [os.path.basename(x) for x in tests]
version_tests = [os.path.basename(x) for x in version_tests]
options = {'cwd': directory,
'timeout': TIMEOUT,
'setenv': {'PYTHONPATH': PYTHONPATH}}
if tests:
atexit.register(os.system, 'rm -f */@test*')
for filename in tests:
if filename in version_tests:
util.log("Overriding %s from %s with file from %s", filename, directory, full_directory)
continue
yield [sys.executable, '-u', '-m', 'monkey_test', filename], options.copy()
yield [sys.executable, '-u', '-m', 'monkey_test', '--Event', filename], options.copy()
options['cwd'] = full_directory
for filename in version_tests:
yield [sys.executable, '-u', '-m', 'monkey_test', filename], options.copy()
yield [sys.executable, '-u', '-m', 'monkey_test', '--Event', filename], options.copy()
def main():
import testrunner
return testrunner.run_many(list(TESTRUNNER(sys.argv[1:])))
if __name__ == '__main__':
main()
| 32.671429 | 126 | 0.637516 | import sys
import os
import glob
import util
import atexit
TIMEOUT = 120
directory = '%s.%s' % sys.version_info[:2]
full_directory = '%s.%s.%s' % sys.version_info[:3]
if hasattr(sys, 'pypy_version_info'):
directory += 'pypy'
full_directory += 'pypy'
version = '%s.%s.%s' % sys.version_info[:3]
def get_absolute_pythonpath():
paths = [os.path.abspath(p) for p in os.environ.get('PYTHONPATH', '').split(os.pathsep)]
return os.pathsep.join(paths)
def TESTRUNNER(tests=None):
if not os.path.exists(directory):
util.log('WARNING: No test directory found at %s', directory)
return
with open(os.path.join(directory, 'version')) as f:
preferred_version = f.read().strip()
if preferred_version != version:
util.log('WARNING: The tests in %s/ are from version %s and your Python is %s', directory, preferred_version, version)
if not tests:
tests = glob.glob('%s/test_*.py' % directory)
version_tests = glob.glob('%s/test_*.py' % full_directory)
tests = sorted(tests)
version_tests = sorted(version_tests)
PYTHONPATH = (os.getcwd() + os.pathsep + get_absolute_pythonpath()).rstrip(':')
tests = [os.path.basename(x) for x in tests]
version_tests = [os.path.basename(x) for x in version_tests]
options = {'cwd': directory,
'timeout': TIMEOUT,
'setenv': {'PYTHONPATH': PYTHONPATH}}
if tests:
atexit.register(os.system, 'rm -f */@test*')
for filename in tests:
if filename in version_tests:
util.log("Overriding %s from %s with file from %s", filename, directory, full_directory)
continue
yield [sys.executable, '-u', '-m', 'monkey_test', filename], options.copy()
yield [sys.executable, '-u', '-m', 'monkey_test', '--Event', filename], options.copy()
options['cwd'] = full_directory
for filename in version_tests:
yield [sys.executable, '-u', '-m', 'monkey_test', filename], options.copy()
yield [sys.executable, '-u', '-m', 'monkey_test', '--Event', filename], options.copy()
def main():
import testrunner
return testrunner.run_many(list(TESTRUNNER(sys.argv[1:])))
if __name__ == '__main__':
main()
| true | true |
1c319bb18ca6aa65a963f78ec43ce5b69b69718d | 963 | py | Python | o3seespy/command/analysis.py | vijaypolimeru/o3seespy | c9ef0c27f685de705721b10eb1ea81c3a3c24c4e | [
"MIT",
"BSD-3-Clause"
] | null | null | null | o3seespy/command/analysis.py | vijaypolimeru/o3seespy | c9ef0c27f685de705721b10eb1ea81c3a3c24c4e | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-06-25T15:33:31.000Z | 2021-06-25T15:33:31.000Z | o3seespy/command/analysis.py | millen1m/o3seespy | 7eead6aef8055f73af39b969e0d3499a67e1737f | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-12-12T21:01:42.000Z | 2020-12-12T21:01:42.000Z | from o3seespy.base_model import OpenSeesObject
class AnalysisBase(OpenSeesObject):
op_base_type = "analysis"
class Static(AnalysisBase):
op_type = "Static"
def __init__(self, osi):
self._parameters = [self.op_type]
self.to_process(osi)
class Transient(AnalysisBase):
op_type = "Transient"
def __init__(self, osi):
self._parameters = [self.op_type]
self.to_process(osi)
class VariableTransient(AnalysisBase):
op_type = "VariableTransient"
def __init__(self, osi):
self._parameters = [self.op_type]
self.to_process(osi)
class PFEM(AnalysisBase):
op_type = "PFEM"
def __init__(self, osi, dt_max, dt_min, gravity, ratio=0.5):
self.dt_max = dt_max
self.dt_min = dt_min
self.gravity = gravity
self.ratio = ratio
self._parameters = [self.op_type, self.dt_max, self.dt_min, self.gravity, self.ratio]
self.to_process(osi)
| 20.934783 | 93 | 0.659398 | from o3seespy.base_model import OpenSeesObject
class AnalysisBase(OpenSeesObject):
op_base_type = "analysis"
class Static(AnalysisBase):
op_type = "Static"
def __init__(self, osi):
self._parameters = [self.op_type]
self.to_process(osi)
class Transient(AnalysisBase):
op_type = "Transient"
def __init__(self, osi):
self._parameters = [self.op_type]
self.to_process(osi)
class VariableTransient(AnalysisBase):
op_type = "VariableTransient"
def __init__(self, osi):
self._parameters = [self.op_type]
self.to_process(osi)
class PFEM(AnalysisBase):
op_type = "PFEM"
def __init__(self, osi, dt_max, dt_min, gravity, ratio=0.5):
self.dt_max = dt_max
self.dt_min = dt_min
self.gravity = gravity
self.ratio = ratio
self._parameters = [self.op_type, self.dt_max, self.dt_min, self.gravity, self.ratio]
self.to_process(osi)
| true | true |
1c319bc32424b49951dda5aa20749cbeca82648c | 15,230 | py | Python | test/functional/docker/test_scenarios.py | LANsible/molecule | c259ef0e920ac12b4e47e60af4e6a33944c41f0b | [
"MIT"
] | 1 | 2019-06-03T01:53:47.000Z | 2019-06-03T01:53:47.000Z | test/functional/docker/test_scenarios.py | LANsible/molecule | c259ef0e920ac12b4e47e60af4e6a33944c41f0b | [
"MIT"
] | 1 | 2018-12-22T09:39:30.000Z | 2018-12-22T09:39:30.000Z | test/functional/docker/test_scenarios.py | LANsible/molecule | c259ef0e920ac12b4e47e60af4e6a33944c41f0b | [
"MIT"
] | 1 | 2019-01-31T20:55:33.000Z | 2019-01-31T20:55:33.000Z | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import re
import pytest
import sh
import shutil
from molecule import util
from ..conftest import (
change_dir_to,
needs_inspec,
needs_rubocop,
skip_unsupported_matrix,
)
@pytest.fixture
def scenario_to_test(request):
return request.param
@pytest.fixture
def scenario_name(request):
try:
return request.param
except AttributeError:
return None
@pytest.fixture
def driver_name(request):
return request.param
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('side_effect', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_side_effect(scenario_to_test, with_scenario, scenario_name):
options = {
'driver_name': 'docker',
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@needs_inspec
@needs_rubocop
@skip_unsupported_matrix
def test_command_init_role_inspec(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
'verifier_name': 'inspec',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
with change_dir_to(role_directory):
sh.molecule('test')
@skip_unsupported_matrix
def test_command_init_scenario_inspec(temp_dir):
options = {
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-init')
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
'verifier_name': 'inspec',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
@skip_unsupported_matrix
def test_command_init_role_goss(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
'verifier_name': 'goss',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
with change_dir_to(role_directory):
sh.molecule('test')
def test_command_init_scenario_goss(temp_dir):
options = {
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-init')
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
'verifier_name': 'goss',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
def test_command_init_scenario_with_invalid_role_raises(temp_dir):
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-role')
with change_dir_to(role_directory):
options = {
'scenario_name': 'default',
'role_name': 'invalid-role-name',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("ERROR: The role 'invalid-role-name' not found. "
'Please choose the proper role name.')
assert msg in str(e.value.stderr)
def test_command_init_scenario_as_default_without_default_scenario(temp_dir):
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-role')
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'default')
shutil.rmtree(scenario_directory)
options = {
'scenario_name': 'default',
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
# NOTE(retr0h): Molecule does not allow the creation of a role without
# a default scenario. This tests roles not created by a newer Molecule.
def test_command_init_scenario_without_default_scenario_raises(temp_dir):
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-role')
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'default')
shutil.rmtree(scenario_directory)
options = {
'scenario_name': 'invalid-role-name',
'role_name': 'test-role',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ('The default scenario not found. Please create a scenario '
"named 'default' first.")
assert msg in str(e.value.stderr)
@skip_unsupported_matrix
def test_command_init_role_with_template(temp_dir):
role_name = 'test-init'
role_directory = os.path.join(temp_dir.strpath, role_name)
options = {
'url': 'https://github.com/retr0h/cookiecutter-molecule.git',
'no_input': True,
'role_name': role_name,
}
cmd = sh.molecule.bake('init', 'template', **options)
pytest.helpers.run_command(cmd)
with change_dir_to(role_directory):
sh.molecule('test')
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('overrride_driver', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_overrides_driver(scenario_to_test, with_scenario,
driver_name, scenario_name):
options = {
'driver_name': driver_name,
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('driver/docker', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_builds_local_molecule_image(
scenario_to_test, with_scenario, scenario_name, driver_name):
try:
cmd = sh.docker.bake('rmi', 'molecule_local/centos:latest', '--force')
pytest.helpers.run_command(cmd)
except sh.ErrorReturnCode:
pass
pytest.helpers.test(driver_name, scenario_name)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('test_destroy_strategy', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_destroy_strategy_always(scenario_to_test, with_scenario,
scenario_name, driver_name):
options = {
'destroy': 'always',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("An error occurred during the test sequence action: 'lint'. "
'Cleaning up.')
assert msg in str(e.value.stdout)
assert 'PLAY [Destroy]' in str(e.value.stdout)
assert 0 != e.value.exit_code
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('test_destroy_strategy', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_destroy_strategy_never(scenario_to_test, with_scenario,
scenario_name, driver_name):
options = {
'destroy': 'never',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("An error occurred during the test sequence action: 'lint'. "
'Cleaning up.')
assert msg not in str(e.value.stdout)
assert 0 != e.value.exit_code
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('host_group_vars', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_host_group_vars(scenario_to_test, with_scenario, scenario_name):
options = {
'all': True,
}
cmd = sh.molecule.bake('test', **options)
out = pytest.helpers.run_command(cmd, log=False)
out = util.strip_ansi_escape(out.stdout.decode('utf-8'))
assert re.search('\[all\].*?ok: \[instance\]', out, re.DOTALL)
assert re.search('\[example\].*?ok: \[instance\]', out, re.DOTALL)
assert re.search('\[example_1\].*?ok: \[instance\]', out, re.DOTALL)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('idempotence', 'docker', 'raises'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_idempotence_raises(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
'all': True,
'destroy': 'never',
}
cmd = sh.molecule.bake('test', **options)
with pytest.raises(sh.ErrorReturnCode_2) as e:
pytest.helpers.run_command(cmd)
assert 2 == e.value.exit_code
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('interpolation', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_interpolation(scenario_to_test, with_scenario, scenario_name):
# Modify global environment so cleanup inherits our environment.
options = {
'all': True,
}
env = os.environ
env.update({
'DRIVER_NAME': 'docker',
'INSTANCE_NAME': 'instance',
})
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, env=env)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'testinfra'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_testinfra(scenario_to_test, with_scenario,
scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'goss'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_goss(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'inspec'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_inspec(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('plugins', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_plugins(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
| 29.688109 | 79 | 0.64872 |
import os
import re
import pytest
import sh
import shutil
from molecule import util
from ..conftest import (
change_dir_to,
needs_inspec,
needs_rubocop,
skip_unsupported_matrix,
)
@pytest.fixture
def scenario_to_test(request):
return request.param
@pytest.fixture
def scenario_name(request):
try:
return request.param
except AttributeError:
return None
@pytest.fixture
def driver_name(request):
return request.param
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('side_effect', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_side_effect(scenario_to_test, with_scenario, scenario_name):
options = {
'driver_name': 'docker',
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@needs_inspec
@needs_rubocop
@skip_unsupported_matrix
def test_command_init_role_inspec(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
'verifier_name': 'inspec',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
with change_dir_to(role_directory):
sh.molecule('test')
@skip_unsupported_matrix
def test_command_init_scenario_inspec(temp_dir):
options = {
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-init')
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
'verifier_name': 'inspec',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
@skip_unsupported_matrix
def test_command_init_role_goss(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
'verifier_name': 'goss',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
with change_dir_to(role_directory):
sh.molecule('test')
def test_command_init_scenario_goss(temp_dir):
options = {
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-init')
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
'verifier_name': 'goss',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
def test_command_init_scenario_with_invalid_role_raises(temp_dir):
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-role')
with change_dir_to(role_directory):
options = {
'scenario_name': 'default',
'role_name': 'invalid-role-name',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("ERROR: The role 'invalid-role-name' not found. "
'Please choose the proper role name.')
assert msg in str(e.value.stderr)
def test_command_init_scenario_as_default_without_default_scenario(temp_dir):
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-role')
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'default')
shutil.rmtree(scenario_directory)
options = {
'scenario_name': 'default',
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
def test_command_init_scenario_without_default_scenario_raises(temp_dir):
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
role_directory = os.path.join(temp_dir.strpath, 'test-role')
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'default')
shutil.rmtree(scenario_directory)
options = {
'scenario_name': 'invalid-role-name',
'role_name': 'test-role',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ('The default scenario not found. Please create a scenario '
"named 'default' first.")
assert msg in str(e.value.stderr)
@skip_unsupported_matrix
def test_command_init_role_with_template(temp_dir):
role_name = 'test-init'
role_directory = os.path.join(temp_dir.strpath, role_name)
options = {
'url': 'https://github.com/retr0h/cookiecutter-molecule.git',
'no_input': True,
'role_name': role_name,
}
cmd = sh.molecule.bake('init', 'template', **options)
pytest.helpers.run_command(cmd)
with change_dir_to(role_directory):
sh.molecule('test')
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('overrride_driver', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_overrides_driver(scenario_to_test, with_scenario,
driver_name, scenario_name):
options = {
'driver_name': driver_name,
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('driver/docker', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_builds_local_molecule_image(
scenario_to_test, with_scenario, scenario_name, driver_name):
try:
cmd = sh.docker.bake('rmi', 'molecule_local/centos:latest', '--force')
pytest.helpers.run_command(cmd)
except sh.ErrorReturnCode:
pass
pytest.helpers.test(driver_name, scenario_name)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('test_destroy_strategy', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_destroy_strategy_always(scenario_to_test, with_scenario,
scenario_name, driver_name):
options = {
'destroy': 'always',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("An error occurred during the test sequence action: 'lint'. "
'Cleaning up.')
assert msg in str(e.value.stdout)
assert 'PLAY [Destroy]' in str(e.value.stdout)
assert 0 != e.value.exit_code
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('test_destroy_strategy', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_destroy_strategy_never(scenario_to_test, with_scenario,
scenario_name, driver_name):
options = {
'destroy': 'never',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("An error occurred during the test sequence action: 'lint'. "
'Cleaning up.')
assert msg not in str(e.value.stdout)
assert 0 != e.value.exit_code
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('host_group_vars', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_host_group_vars(scenario_to_test, with_scenario, scenario_name):
options = {
'all': True,
}
cmd = sh.molecule.bake('test', **options)
out = pytest.helpers.run_command(cmd, log=False)
out = util.strip_ansi_escape(out.stdout.decode('utf-8'))
assert re.search('\[all\].*?ok: \[instance\]', out, re.DOTALL)
assert re.search('\[example\].*?ok: \[instance\]', out, re.DOTALL)
assert re.search('\[example_1\].*?ok: \[instance\]', out, re.DOTALL)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('idempotence', 'docker', 'raises'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_idempotence_raises(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
'all': True,
'destroy': 'never',
}
cmd = sh.molecule.bake('test', **options)
with pytest.raises(sh.ErrorReturnCode_2) as e:
pytest.helpers.run_command(cmd)
assert 2 == e.value.exit_code
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('interpolation', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_interpolation(scenario_to_test, with_scenario, scenario_name):
options = {
'all': True,
}
env = os.environ
env.update({
'DRIVER_NAME': 'docker',
'INSTANCE_NAME': 'instance',
})
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, env=env)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'testinfra'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_testinfra(scenario_to_test, with_scenario,
scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'goss'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_goss(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'inspec'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_inspec(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@skip_unsupported_matrix
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('plugins', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_plugins(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
| true | true |
1c319bffd032ab13cd239f57cf3f5a0f434956ba | 784 | py | Python | utils/get_api.py | guillermo-carrasco/tweestream | 5171b8fae27a13395eb38ed90031cf6caf6d444e | [
"MIT"
] | null | null | null | utils/get_api.py | guillermo-carrasco/tweestream | 5171b8fae27a13395eb38ed90031cf6caf6d444e | [
"MIT"
] | 6 | 2018-03-29T14:14:53.000Z | 2020-10-23T09:57:12.000Z | utils/get_api.py | guillermo-carrasco/twistream | 5171b8fae27a13395eb38ed90031cf6caf6d444e | [
"MIT"
] | null | null | null | """
Simple method to quickly get an API object, already authenticated, to work with interactively. It expects a config.yaml
file in the same directory where it is ran. This yaml file should contain twitter application credentials in the same
format expected by twistream. In fact, you can very well use the same configuration file.
"""
import yaml
from tweepy import API
from twistream.twitter import client
def get_api() -> API:
with open("config.yaml", "r") as f:
config = yaml.load(f, Loader=yaml.FlowMappingEndToken)
return client.get_api(
config.get("twitter").get("consumer_key"),
config.get("twitter").get("consumer_secret"),
config.get("twitter").get("access_token"),
config.get("twitter").get("access_token_secret"),
)
| 35.636364 | 119 | 0.714286 | import yaml
from tweepy import API
from twistream.twitter import client
def get_api() -> API:
with open("config.yaml", "r") as f:
config = yaml.load(f, Loader=yaml.FlowMappingEndToken)
return client.get_api(
config.get("twitter").get("consumer_key"),
config.get("twitter").get("consumer_secret"),
config.get("twitter").get("access_token"),
config.get("twitter").get("access_token_secret"),
)
| true | true |
1c319d63f6f6de62c8bef71b24da7d04b6142551 | 142 | py | Python | examples/select_chat_by_number.py | YohananDiamond/whatsappy | 2474839baf32295fea568c4dd30c59edace11e58 | [
"MIT"
] | null | null | null | examples/select_chat_by_number.py | YohananDiamond/whatsappy | 2474839baf32295fea568c4dd30c59edace11e58 | [
"MIT"
] | null | null | null | examples/select_chat_by_number.py | YohananDiamond/whatsappy | 2474839baf32295fea568c4dd30c59edace11e58 | [
"MIT"
] | null | null | null | from whatsappy import whatsapp
whatsapp.login()
whatsapp.select_chat_by_number(14078888888) # <--
whatsapp.send("Hello")
whatsapp.close()
| 15.777778 | 50 | 0.774648 | from whatsappy import whatsapp
whatsapp.login()
whatsapp.select_chat_by_number(14078888888)
whatsapp.send("Hello")
whatsapp.close()
| true | true |
1c319dff802e65ab8ad36d6d7405dd863634b7df | 2,999 | py | Python | Chapter09/chh9-1.nltk.py | tschoi6712/pythonDataAnalysis2nd | 63e366d4dee52f7e4df6cf4d988a85d6de5b00e4 | [
"MIT"
] | null | null | null | Chapter09/chh9-1.nltk.py | tschoi6712/pythonDataAnalysis2nd | 63e366d4dee52f7e4df6cf4d988a85d6de5b00e4 | [
"MIT"
] | null | null | null | Chapter09/chh9-1.nltk.py | tschoi6712/pythonDataAnalysis2nd | 63e366d4dee52f7e4df6cf4d988a85d6de5b00e4 | [
"MIT"
] | null | null | null | """
문자 데이터와 소셜미디어 분석하기(1)
"""
# pip install nltk scikit-learn
# >python
# >>>import nltk
# >>>nltk.download()
## 1. Filtering out stopwords, names, and numbers(불용어, 고유명사, 숫자 걸러내기)
import nltk
# 영어 말뭉치를 불러오기
sw = set(nltk.corpus.stopwords.words('english'))
print("Stop words:", list(sw)[:7])
# 구텐베르크 말뭉치 불러오기
gb = nltk.corpus.gutenberg
print("Gutenberg files:\n", gb.fileids()[-5:])
# 파일에서 몇 개의 문장을 추출
text_sent = gb.sents("milton-paradise.txt")[:2]
print("Unfiltered:", text_sent)
# 추출된 문장에서 불용어를 걸러내기
for sent in text_sent:
filtered = [w for w in sent if w.lower() not in sw]
print("Filtered:\n", filtered)
tagged = nltk.pos_tag(filtered)
print("Tagged:\n", tagged)
words = []
for word in tagged:
if word[1] != 'NNP' and word[1] != 'CD':
words.append(word[0])
print("Words:\n", words)
## 2. Bag of words model(단어 주머니 모델)
import sklearn as sk
# 말뭉치에서 두개의 문서를 불러오기
hamlet = gb.raw("shakespeare-hamlet.txt")
macbeth = gb.raw("shakespeare-macbeth.txt")
# 불용어를 제외하고 피처벡터를 생성
cv = sk.feature_extraction.text.CountVectorizer(stop_words='english')
print("Feature vector:\n", cv.fit_transform([hamlet, macbeth]).toarray())
# 두 문서 사이에서 피쳐(유일한 단어)를 출력
print("Features:\n", cv.get_feature_names()[:5])
## 3. Analyzing word frequencies(단어 빈도수 분석)
import nltk
import string
gb = nltk.corpus.gutenberg
words = gb.words("shakespeare-caesar.txt")
sw = set(nltk.corpus.stopwords.words('english'))
punctuation = set(string.punctuation)
filtered = [w.lower() for w in words if w.lower() not in sw and w.lower() not in punctuation]
fd = nltk.FreqDist(filtered)
print("Words", list(fd.keys())[:5])
print("Counts", list(fd.values())[:5])
print("Max", fd.max())
print("Count", fd['d'])
fd = nltk.FreqDist(nltk.bigrams(filtered))
print("Bigrams", list(fd.keys())[:5])
print("Counts", list(fd.values())[:5])
print("Bigram Max", fd.max())
print("Bigram count", fd[('let', 'vs')])
## 4. Naive Bayesian(나이브 베이즈 분류기)
import nltk
import string
import random
sw = set(nltk.corpus.stopwords.words('english'))
punctuation = set(string.punctuation)
def word_features(word):
return {'len': len(word)}
def isStopword(word):
return word in sw or word in punctuation
gb = nltk.corpus.gutenberg
words = gb.words("shakespeare-caesar.txt")
# 단어에 라벨 붙이기
labeled_words = ([(word.lower(), isStopword(word.lower())) for word in words])
random.seed(42)
random.shuffle(labeled_words)
print(labeled_words[:5])
# 각 단어별 길이를 측정
featuresets = [(word_features(n), word) for (n, word) in labeled_words]
# 데이터를 학습시키기
cutoff = int(.9 * len(featuresets))
train_set, test_set = featuresets[:cutoff], featuresets[cutoff:]
classifier = nltk.NaiveBayesClassifier.train(train_set)
# 데이터의 단어가 분류되었는지 학습
print("'behold' class", classifier.classify(word_features('behold')))
print("'the' class", classifier.classify(word_features('the')))
# 분류 정확도
print("Accuracy", nltk.classify.accuracy(classifier, test_set))
print(classifier.show_most_informative_features(5))
| 22.89313 | 93 | 0.692564 |
t("Stop words:", list(sw)[:7])
gb = nltk.corpus.gutenberg
print("Gutenberg files:\n", gb.fileids()[-5:])
text_sent = gb.sents("milton-paradise.txt")[:2]
print("Unfiltered:", text_sent)
for sent in text_sent:
filtered = [w for w in sent if w.lower() not in sw]
print("Filtered:\n", filtered)
tagged = nltk.pos_tag(filtered)
print("Tagged:\n", tagged)
words = []
for word in tagged:
if word[1] != 'NNP' and word[1] != 'CD':
words.append(word[0])
print("Words:\n", words)
gb.raw("shakespeare-hamlet.txt")
macbeth = gb.raw("shakespeare-macbeth.txt")
cv = sk.feature_extraction.text.CountVectorizer(stop_words='english')
print("Feature vector:\n", cv.fit_transform([hamlet, macbeth]).toarray())
print("Features:\n", cv.get_feature_names()[:5])
us.gutenberg
words = gb.words("shakespeare-caesar.txt")
sw = set(nltk.corpus.stopwords.words('english'))
punctuation = set(string.punctuation)
filtered = [w.lower() for w in words if w.lower() not in sw and w.lower() not in punctuation]
fd = nltk.FreqDist(filtered)
print("Words", list(fd.keys())[:5])
print("Counts", list(fd.values())[:5])
print("Max", fd.max())
print("Count", fd['d'])
fd = nltk.FreqDist(nltk.bigrams(filtered))
print("Bigrams", list(fd.keys())[:5])
print("Counts", list(fd.values())[:5])
print("Bigram Max", fd.max())
print("Bigram count", fd[('let', 'vs')])
t random
sw = set(nltk.corpus.stopwords.words('english'))
punctuation = set(string.punctuation)
def word_features(word):
return {'len': len(word)}
def isStopword(word):
return word in sw or word in punctuation
gb = nltk.corpus.gutenberg
words = gb.words("shakespeare-caesar.txt")
labeled_words = ([(word.lower(), isStopword(word.lower())) for word in words])
random.seed(42)
random.shuffle(labeled_words)
print(labeled_words[:5])
featuresets = [(word_features(n), word) for (n, word) in labeled_words]
cutoff = int(.9 * len(featuresets))
train_set, test_set = featuresets[:cutoff], featuresets[cutoff:]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print("'behold' class", classifier.classify(word_features('behold')))
print("'the' class", classifier.classify(word_features('the')))
print("Accuracy", nltk.classify.accuracy(classifier, test_set))
print(classifier.show_most_informative_features(5))
| true | true |
1c319e88e8a6163f1d250717cf2460bdb0893a46 | 37,716 | py | Python | tests/test_generators/output/kitchen_sink.py | joseporiolayats/linkml | 1d0a736079847764c6a78349dcc8838839009b74 | [
"CC0-1.0"
] | null | null | null | tests/test_generators/output/kitchen_sink.py | joseporiolayats/linkml | 1d0a736079847764c6a78349dcc8838839009b74 | [
"CC0-1.0"
] | null | null | null | tests/test_generators/output/kitchen_sink.py | joseporiolayats/linkml | 1d0a736079847764c6a78349dcc8838839009b74 | [
"CC0-1.0"
] | null | null | null | # Auto generated from kitchen_sink.yaml by pythongen.py version: 0.9.0
# Generation date: 2021-11-13T01:57:24
# Schema: kitchen_sink
#
# id: https://w3id.org/linkml/tests/kitchen_sink
# description: Kitchen Sink Schema This schema does not do anything useful. It exists to test all features of
# linkml. This particular text field exists to demonstrate markdown within a text field: Lists: * a *
# b * c And links, e.g to [Person](Person.md)
# license: https://creativecommons.org/publicdomain/zero/1.0/
import dataclasses
import sys
import re
from jsonasobj2 import JsonObj, as_dict
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from linkml_runtime.linkml_model.meta import EnumDefinition, PermissibleValue, PvFormulaOptions
from linkml_runtime.utils.slot import Slot
from linkml_runtime.utils.metamodelcore import empty_list, empty_dict, bnode
from linkml_runtime.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
from linkml_runtime.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from linkml_runtime.utils.formatutils import camelcase, underscore, sfx
from linkml_runtime.utils.enumerations import EnumDefinitionImpl
from rdflib import Namespace, URIRef
from linkml_runtime.utils.curienamespace import CurieNamespace
from linkml_runtime.linkml_model.types import Boolean, Date, Integer, String
from linkml_runtime.utils.metamodelcore import Bool, XSDDate
metamodel_version = "1.7.0"
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
A = CurieNamespace('A', 'http://example.org/activities/')
BFO = CurieNamespace('BFO', 'http://purl.obolibrary.org/obo/BFO_')
CODE = CurieNamespace('CODE', 'http://example.org/code/')
P = CurieNamespace('P', 'http://example.org/person/')
RO = CurieNamespace('RO', 'http://purl.obolibrary.org/obo/RO_')
ROR = CurieNamespace('ROR', 'http://example.org/ror/')
BIOLINK = CurieNamespace('biolink', 'https://w3id.org/biolink/')
BIZCODES = CurieNamespace('bizcodes', 'https://example.org/bizcodes/')
CORE = CurieNamespace('core', 'https://w3id.org/linkml/tests/core/')
DCE = CurieNamespace('dce', 'http://purl.org/dc/elements/1.1/')
KS = CurieNamespace('ks', 'https://w3id.org/linkml/tests/kitchen_sink/')
LEGO = CurieNamespace('lego', 'http://geneontology.org/lego/')
LINKML = CurieNamespace('linkml', 'https://w3id.org/linkml/')
PAV = CurieNamespace('pav', 'http://purl.org/pav/')
PROV = CurieNamespace('prov', 'http://www.w3.org/ns/prov#')
SCHEMA = CurieNamespace('schema', 'http://schema.org/')
SKOS = CurieNamespace('skos', 'http://www.w3.org/2004/02/skos/core#')
DEFAULT_ = KS
# Types
# Class references
class PersonId(extended_str):
pass
class OrganizationId(extended_str):
pass
class PlaceId(extended_str):
pass
class ConceptId(extended_str):
pass
class DiagnosisConceptId(ConceptId):
pass
class ProcedureConceptId(ConceptId):
pass
class CompanyId(OrganizationId):
pass
class CodeSystemId(extended_str):
pass
class ActivityId(extended_str):
pass
class AgentId(extended_str):
pass
@dataclass
class HasAliases(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.HasAliases
class_class_curie: ClassVar[str] = "ks:HasAliases"
class_name: ClassVar[str] = "HasAliases"
class_model_uri: ClassVar[URIRef] = KS.HasAliases
aliases: Optional[Union[str, List[str]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if not isinstance(self.aliases, list):
self.aliases = [self.aliases] if self.aliases is not None else []
self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases]
super().__post_init__(**kwargs)
@dataclass
class Friend(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Friend
class_class_curie: ClassVar[str] = "ks:Friend"
class_name: ClassVar[str] = "Friend"
class_model_uri: ClassVar[URIRef] = KS.Friend
name: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
super().__post_init__(**kwargs)
@dataclass
class Person(YAMLRoot):
"""
A person, living or dead
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Person
class_class_curie: ClassVar[str] = "ks:Person"
class_name: ClassVar[str] = "Person"
class_model_uri: ClassVar[URIRef] = KS.Person
id: Union[str, PersonId] = None
name: Optional[str] = None
has_employment_history: Optional[Union[Union[dict, "EmploymentEvent"], List[Union[dict, "EmploymentEvent"]]]] = empty_list()
has_familial_relationships: Optional[Union[Union[dict, "FamilialRelationship"], List[Union[dict, "FamilialRelationship"]]]] = empty_list()
has_medical_history: Optional[Union[Union[dict, "MedicalEvent"], List[Union[dict, "MedicalEvent"]]]] = empty_list()
age_in_years: Optional[int] = None
addresses: Optional[Union[Union[dict, "Address"], List[Union[dict, "Address"]]]] = empty_list()
has_birth_event: Optional[Union[dict, "BirthEvent"]] = None
aliases: Optional[Union[str, List[str]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, PersonId):
self.id = PersonId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
if not isinstance(self.has_employment_history, list):
self.has_employment_history = [self.has_employment_history] if self.has_employment_history is not None else []
self.has_employment_history = [v if isinstance(v, EmploymentEvent) else EmploymentEvent(**as_dict(v)) for v in self.has_employment_history]
self._normalize_inlined_as_list(slot_name="has_familial_relationships", slot_type=FamilialRelationship, key_name="type", keyed=False)
if not isinstance(self.has_medical_history, list):
self.has_medical_history = [self.has_medical_history] if self.has_medical_history is not None else []
self.has_medical_history = [v if isinstance(v, MedicalEvent) else MedicalEvent(**as_dict(v)) for v in self.has_medical_history]
if self.age_in_years is not None and not isinstance(self.age_in_years, int):
self.age_in_years = int(self.age_in_years)
if not isinstance(self.addresses, list):
self.addresses = [self.addresses] if self.addresses is not None else []
self.addresses = [v if isinstance(v, Address) else Address(**as_dict(v)) for v in self.addresses]
if self.has_birth_event is not None and not isinstance(self.has_birth_event, BirthEvent):
self.has_birth_event = BirthEvent(**as_dict(self.has_birth_event))
if not isinstance(self.aliases, list):
self.aliases = [self.aliases] if self.aliases is not None else []
self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases]
super().__post_init__(**kwargs)
@dataclass
class Organization(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Organization
class_class_curie: ClassVar[str] = "ks:Organization"
class_name: ClassVar[str] = "Organization"
class_model_uri: ClassVar[URIRef] = KS.Organization
id: Union[str, OrganizationId] = None
name: Optional[str] = None
aliases: Optional[Union[str, List[str]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, OrganizationId):
self.id = OrganizationId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
if not isinstance(self.aliases, list):
self.aliases = [self.aliases] if self.aliases is not None else []
self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases]
super().__post_init__(**kwargs)
@dataclass
class Place(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Place
class_class_curie: ClassVar[str] = "ks:Place"
class_name: ClassVar[str] = "Place"
class_model_uri: ClassVar[URIRef] = KS.Place
id: Union[str, PlaceId] = None
name: Optional[str] = None
aliases: Optional[Union[str, List[str]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, PlaceId):
self.id = PlaceId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
if not isinstance(self.aliases, list):
self.aliases = [self.aliases] if self.aliases is not None else []
self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases]
super().__post_init__(**kwargs)
@dataclass
class Address(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Address
class_class_curie: ClassVar[str] = "ks:Address"
class_name: ClassVar[str] = "Address"
class_model_uri: ClassVar[URIRef] = KS.Address
street: Optional[str] = None
city: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.street is not None and not isinstance(self.street, str):
self.street = str(self.street)
if self.city is not None and not isinstance(self.city, str):
self.city = str(self.city)
super().__post_init__(**kwargs)
@dataclass
class Concept(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Concept
class_class_curie: ClassVar[str] = "ks:Concept"
class_name: ClassVar[str] = "Concept"
class_model_uri: ClassVar[URIRef] = KS.Concept
id: Union[str, ConceptId] = None
name: Optional[str] = None
in_code_system: Optional[Union[str, CodeSystemId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ConceptId):
self.id = ConceptId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
if self.in_code_system is not None and not isinstance(self.in_code_system, CodeSystemId):
self.in_code_system = CodeSystemId(self.in_code_system)
super().__post_init__(**kwargs)
@dataclass
class DiagnosisConcept(Concept):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.DiagnosisConcept
class_class_curie: ClassVar[str] = "ks:DiagnosisConcept"
class_name: ClassVar[str] = "DiagnosisConcept"
class_model_uri: ClassVar[URIRef] = KS.DiagnosisConcept
id: Union[str, DiagnosisConceptId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, DiagnosisConceptId):
self.id = DiagnosisConceptId(self.id)
super().__post_init__(**kwargs)
@dataclass
class ProcedureConcept(Concept):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.ProcedureConcept
class_class_curie: ClassVar[str] = "ks:ProcedureConcept"
class_name: ClassVar[str] = "ProcedureConcept"
class_model_uri: ClassVar[URIRef] = KS.ProcedureConcept
id: Union[str, ProcedureConceptId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ProcedureConceptId):
self.id = ProcedureConceptId(self.id)
super().__post_init__(**kwargs)
@dataclass
class Event(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Event
class_class_curie: ClassVar[str] = "ks:Event"
class_name: ClassVar[str] = "Event"
class_model_uri: ClassVar[URIRef] = KS.Event
started_at_time: Optional[Union[str, XSDDate]] = None
ended_at_time: Optional[Union[str, XSDDate]] = None
is_current: Optional[Union[bool, Bool]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.started_at_time is not None and not isinstance(self.started_at_time, XSDDate):
self.started_at_time = XSDDate(self.started_at_time)
if self.ended_at_time is not None and not isinstance(self.ended_at_time, XSDDate):
self.ended_at_time = XSDDate(self.ended_at_time)
if self.is_current is not None and not isinstance(self.is_current, Bool):
self.is_current = Bool(self.is_current)
super().__post_init__(**kwargs)
@dataclass
class Relationship(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Relationship
class_class_curie: ClassVar[str] = "ks:Relationship"
class_name: ClassVar[str] = "Relationship"
class_model_uri: ClassVar[URIRef] = KS.Relationship
started_at_time: Optional[Union[str, XSDDate]] = None
ended_at_time: Optional[Union[str, XSDDate]] = None
related_to: Optional[str] = None
type: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.started_at_time is not None and not isinstance(self.started_at_time, XSDDate):
self.started_at_time = XSDDate(self.started_at_time)
if self.ended_at_time is not None and not isinstance(self.ended_at_time, XSDDate):
self.ended_at_time = XSDDate(self.ended_at_time)
if self.related_to is not None and not isinstance(self.related_to, str):
self.related_to = str(self.related_to)
if self.type is not None and not isinstance(self.type, str):
self.type = str(self.type)
super().__post_init__(**kwargs)
@dataclass
class FamilialRelationship(Relationship):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.FamilialRelationship
class_class_curie: ClassVar[str] = "ks:FamilialRelationship"
class_name: ClassVar[str] = "FamilialRelationship"
class_model_uri: ClassVar[URIRef] = KS.FamilialRelationship
type: Union[str, "FamilialRelationshipType"] = None
related_to: Union[str, PersonId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.type):
self.MissingRequiredField("type")
if not isinstance(self.type, FamilialRelationshipType):
self.type = FamilialRelationshipType(self.type)
if self._is_empty(self.related_to):
self.MissingRequiredField("related_to")
if not isinstance(self.related_to, PersonId):
self.related_to = PersonId(self.related_to)
super().__post_init__(**kwargs)
@dataclass
class BirthEvent(Event):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.BirthEvent
class_class_curie: ClassVar[str] = "ks:BirthEvent"
class_name: ClassVar[str] = "BirthEvent"
class_model_uri: ClassVar[URIRef] = KS.BirthEvent
in_location: Optional[Union[str, PlaceId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.in_location is not None and not isinstance(self.in_location, PlaceId):
self.in_location = PlaceId(self.in_location)
super().__post_init__(**kwargs)
@dataclass
class EmploymentEvent(Event):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.EmploymentEvent
class_class_curie: ClassVar[str] = "ks:EmploymentEvent"
class_name: ClassVar[str] = "EmploymentEvent"
class_model_uri: ClassVar[URIRef] = KS.EmploymentEvent
employed_at: Optional[Union[str, CompanyId]] = None
type: Optional[Union[str, "EmploymentEventType"]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.employed_at is not None and not isinstance(self.employed_at, CompanyId):
self.employed_at = CompanyId(self.employed_at)
if self.type is not None and not isinstance(self.type, EmploymentEventType):
self.type = EmploymentEventType(self.type)
super().__post_init__(**kwargs)
@dataclass
class MedicalEvent(Event):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.MedicalEvent
class_class_curie: ClassVar[str] = "ks:MedicalEvent"
class_name: ClassVar[str] = "MedicalEvent"
class_model_uri: ClassVar[URIRef] = KS.MedicalEvent
in_location: Optional[Union[str, PlaceId]] = None
diagnosis: Optional[Union[dict, DiagnosisConcept]] = None
procedure: Optional[Union[dict, ProcedureConcept]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.in_location is not None and not isinstance(self.in_location, PlaceId):
self.in_location = PlaceId(self.in_location)
if self.diagnosis is not None and not isinstance(self.diagnosis, DiagnosisConcept):
self.diagnosis = DiagnosisConcept(**as_dict(self.diagnosis))
if self.procedure is not None and not isinstance(self.procedure, ProcedureConcept):
self.procedure = ProcedureConcept(**as_dict(self.procedure))
super().__post_init__(**kwargs)
@dataclass
class WithLocation(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.WithLocation
class_class_curie: ClassVar[str] = "ks:WithLocation"
class_name: ClassVar[str] = "WithLocation"
class_model_uri: ClassVar[URIRef] = KS.WithLocation
in_location: Optional[Union[str, PlaceId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.in_location is not None and not isinstance(self.in_location, PlaceId):
self.in_location = PlaceId(self.in_location)
super().__post_init__(**kwargs)
@dataclass
class MarriageEvent(Event):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.MarriageEvent
class_class_curie: ClassVar[str] = "ks:MarriageEvent"
class_name: ClassVar[str] = "MarriageEvent"
class_model_uri: ClassVar[URIRef] = KS.MarriageEvent
married_to: Optional[Union[str, PersonId]] = None
in_location: Optional[Union[str, PlaceId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.married_to is not None and not isinstance(self.married_to, PersonId):
self.married_to = PersonId(self.married_to)
if self.in_location is not None and not isinstance(self.in_location, PlaceId):
self.in_location = PlaceId(self.in_location)
super().__post_init__(**kwargs)
@dataclass
class Company(Organization):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Company
class_class_curie: ClassVar[str] = "ks:Company"
class_name: ClassVar[str] = "Company"
class_model_uri: ClassVar[URIRef] = KS.Company
id: Union[str, CompanyId] = None
ceo: Optional[Union[str, PersonId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, CompanyId):
self.id = CompanyId(self.id)
if self.ceo is not None and not isinstance(self.ceo, PersonId):
self.ceo = PersonId(self.ceo)
super().__post_init__(**kwargs)
@dataclass
class CodeSystem(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.CodeSystem
class_class_curie: ClassVar[str] = "ks:CodeSystem"
class_name: ClassVar[str] = "CodeSystem"
class_model_uri: ClassVar[URIRef] = KS.CodeSystem
id: Union[str, CodeSystemId] = None
name: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, CodeSystemId):
self.id = CodeSystemId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
super().__post_init__(**kwargs)
@dataclass
class Dataset(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Dataset
class_class_curie: ClassVar[str] = "ks:Dataset"
class_name: ClassVar[str] = "Dataset"
class_model_uri: ClassVar[URIRef] = KS.Dataset
persons: Optional[Union[Dict[Union[str, PersonId], Union[dict, Person]], List[Union[dict, Person]]]] = empty_dict()
companies: Optional[Union[Dict[Union[str, CompanyId], Union[dict, Company]], List[Union[dict, Company]]]] = empty_dict()
activities: Optional[Union[Dict[Union[str, ActivityId], Union[dict, "Activity"]], List[Union[dict, "Activity"]]]] = empty_dict()
code_systems: Optional[Union[Dict[Union[str, CodeSystemId], Union[dict, CodeSystem]], List[Union[dict, CodeSystem]]]] = empty_dict()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
self._normalize_inlined_as_list(slot_name="persons", slot_type=Person, key_name="id", keyed=True)
self._normalize_inlined_as_list(slot_name="companies", slot_type=Company, key_name="id", keyed=True)
self._normalize_inlined_as_list(slot_name="activities", slot_type=Activity, key_name="id", keyed=True)
self._normalize_inlined_as_dict(slot_name="code_systems", slot_type=CodeSystem, key_name="id", keyed=True)
super().__post_init__(**kwargs)
@dataclass
class FakeClass(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.FakeClass
class_class_curie: ClassVar[str] = "ks:FakeClass"
class_name: ClassVar[str] = "FakeClass"
class_model_uri: ClassVar[URIRef] = KS.FakeClass
test_attribute: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.test_attribute is not None and not isinstance(self.test_attribute, str):
self.test_attribute = str(self.test_attribute)
super().__post_init__(**kwargs)
@dataclass
class ClassWithSpaces(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.ClassWithSpaces
class_class_curie: ClassVar[str] = "ks:ClassWithSpaces"
class_name: ClassVar[str] = "class with spaces"
class_model_uri: ClassVar[URIRef] = KS.ClassWithSpaces
slot_with_space_1: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.slot_with_space_1 is not None and not isinstance(self.slot_with_space_1, str):
self.slot_with_space_1 = str(self.slot_with_space_1)
super().__post_init__(**kwargs)
@dataclass
class SubclassTest(ClassWithSpaces):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.SubclassTest
class_class_curie: ClassVar[str] = "ks:SubclassTest"
class_name: ClassVar[str] = "subclass test"
class_model_uri: ClassVar[URIRef] = KS.SubclassTest
slot_with_space_2: Optional[Union[dict, ClassWithSpaces]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.slot_with_space_2 is not None and not isinstance(self.slot_with_space_2, ClassWithSpaces):
self.slot_with_space_2 = ClassWithSpaces(**as_dict(self.slot_with_space_2))
super().__post_init__(**kwargs)
@dataclass
class Activity(YAMLRoot):
"""
a provence-generating activity
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = CORE.Activity
class_class_curie: ClassVar[str] = "core:Activity"
class_name: ClassVar[str] = "activity"
class_model_uri: ClassVar[URIRef] = KS.Activity
id: Union[str, ActivityId] = None
started_at_time: Optional[Union[str, XSDDate]] = None
ended_at_time: Optional[Union[str, XSDDate]] = None
was_informed_by: Optional[Union[str, ActivityId]] = None
was_associated_with: Optional[Union[str, AgentId]] = None
used: Optional[str] = None
description: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ActivityId):
self.id = ActivityId(self.id)
if self.started_at_time is not None and not isinstance(self.started_at_time, XSDDate):
self.started_at_time = XSDDate(self.started_at_time)
if self.ended_at_time is not None and not isinstance(self.ended_at_time, XSDDate):
self.ended_at_time = XSDDate(self.ended_at_time)
if self.was_informed_by is not None and not isinstance(self.was_informed_by, ActivityId):
self.was_informed_by = ActivityId(self.was_informed_by)
if self.was_associated_with is not None and not isinstance(self.was_associated_with, AgentId):
self.was_associated_with = AgentId(self.was_associated_with)
if self.used is not None and not isinstance(self.used, str):
self.used = str(self.used)
if self.description is not None and not isinstance(self.description, str):
self.description = str(self.description)
super().__post_init__(**kwargs)
@dataclass
class Agent(YAMLRoot):
"""
a provence-generating agent
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = PROV.Agent
class_class_curie: ClassVar[str] = "prov:Agent"
class_name: ClassVar[str] = "agent"
class_model_uri: ClassVar[URIRef] = KS.Agent
id: Union[str, AgentId] = None
acted_on_behalf_of: Optional[Union[str, AgentId]] = None
was_informed_by: Optional[Union[str, ActivityId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, AgentId):
self.id = AgentId(self.id)
if self.acted_on_behalf_of is not None and not isinstance(self.acted_on_behalf_of, AgentId):
self.acted_on_behalf_of = AgentId(self.acted_on_behalf_of)
if self.was_informed_by is not None and not isinstance(self.was_informed_by, ActivityId):
self.was_informed_by = ActivityId(self.was_informed_by)
super().__post_init__(**kwargs)
# Enumerations
class FamilialRelationshipType(EnumDefinitionImpl):
SIBLING_OF = PermissibleValue(text="SIBLING_OF")
PARENT_OF = PermissibleValue(text="PARENT_OF")
CHILD_OF = PermissibleValue(text="CHILD_OF")
_defn = EnumDefinition(
name="FamilialRelationshipType",
)
class DiagnosisType(EnumDefinitionImpl):
TODO = PermissibleValue(text="TODO")
_defn = EnumDefinition(
name="DiagnosisType",
)
class EmploymentEventType(EnumDefinitionImpl):
HIRE = PermissibleValue(text="HIRE",
meaning=BIZCODES["001"])
FIRE = PermissibleValue(text="FIRE",
meaning=BIZCODES["002"])
PROMOTION = PermissibleValue(text="PROMOTION",
meaning=BIZCODES["003"])
TRANSFER = PermissibleValue(text="TRANSFER",
meaning=BIZCODES["004"])
_defn = EnumDefinition(
name="EmploymentEventType",
)
# Slots
class slots:
pass
slots.employed_at = Slot(uri=KS.employed_at, name="employed at", curie=KS.curie('employed_at'),
model_uri=KS.employed_at, domain=None, range=Optional[Union[str, CompanyId]])
slots.is_current = Slot(uri=KS.is_current, name="is current", curie=KS.curie('is_current'),
model_uri=KS.is_current, domain=None, range=Optional[Union[bool, Bool]])
slots.has_employment_history = Slot(uri=KS.has_employment_history, name="has employment history", curie=KS.curie('has_employment_history'),
model_uri=KS.has_employment_history, domain=None, range=Optional[Union[Union[dict, EmploymentEvent], List[Union[dict, EmploymentEvent]]]])
slots.has_marriage_history = Slot(uri=KS.has_marriage_history, name="has marriage history", curie=KS.curie('has_marriage_history'),
model_uri=KS.has_marriage_history, domain=None, range=Optional[Union[Union[dict, MarriageEvent], List[Union[dict, MarriageEvent]]]])
slots.has_medical_history = Slot(uri=KS.has_medical_history, name="has medical history", curie=KS.curie('has_medical_history'),
model_uri=KS.has_medical_history, domain=None, range=Optional[Union[Union[dict, MedicalEvent], List[Union[dict, MedicalEvent]]]])
slots.has_familial_relationships = Slot(uri=KS.has_familial_relationships, name="has familial relationships", curie=KS.curie('has_familial_relationships'),
model_uri=KS.has_familial_relationships, domain=None, range=Optional[Union[Union[dict, FamilialRelationship], List[Union[dict, FamilialRelationship]]]])
slots.married_to = Slot(uri=KS.married_to, name="married to", curie=KS.curie('married_to'),
model_uri=KS.married_to, domain=None, range=Optional[Union[str, PersonId]])
slots.in_location = Slot(uri=KS.in_location, name="in location", curie=KS.curie('in_location'),
model_uri=KS.in_location, domain=None, range=Optional[Union[str, PlaceId]])
slots.diagnosis = Slot(uri=KS.diagnosis, name="diagnosis", curie=KS.curie('diagnosis'),
model_uri=KS.diagnosis, domain=None, range=Optional[Union[dict, DiagnosisConcept]])
slots.procedure = Slot(uri=KS.procedure, name="procedure", curie=KS.curie('procedure'),
model_uri=KS.procedure, domain=None, range=Optional[Union[dict, ProcedureConcept]])
slots.addresses = Slot(uri=KS.addresses, name="addresses", curie=KS.curie('addresses'),
model_uri=KS.addresses, domain=None, range=Optional[Union[Union[dict, Address], List[Union[dict, Address]]]])
slots.age_in_years = Slot(uri=KS.age_in_years, name="age in years", curie=KS.curie('age_in_years'),
model_uri=KS.age_in_years, domain=None, range=Optional[int])
slots.related_to = Slot(uri=KS.related_to, name="related to", curie=KS.curie('related_to'),
model_uri=KS.related_to, domain=None, range=Optional[str])
slots.type = Slot(uri=KS.type, name="type", curie=KS.curie('type'),
model_uri=KS.type, domain=None, range=Optional[str])
slots.street = Slot(uri=KS.street, name="street", curie=KS.curie('street'),
model_uri=KS.street, domain=None, range=Optional[str])
slots.city = Slot(uri=KS.city, name="city", curie=KS.curie('city'),
model_uri=KS.city, domain=None, range=Optional[str])
slots.has_birth_event = Slot(uri=KS.has_birth_event, name="has birth event", curie=KS.curie('has_birth_event'),
model_uri=KS.has_birth_event, domain=None, range=Optional[Union[dict, BirthEvent]])
slots.in_code_system = Slot(uri=KS.in_code_system, name="in code system", curie=KS.curie('in_code_system'),
model_uri=KS.in_code_system, domain=None, range=Optional[Union[str, CodeSystemId]])
slots.id = Slot(uri=CORE.id, name="id", curie=CORE.curie('id'),
model_uri=KS.id, domain=None, range=URIRef)
slots.name = Slot(uri=CORE.name, name="name", curie=CORE.curie('name'),
model_uri=KS.name, domain=None, range=Optional[str])
slots.description = Slot(uri=CORE.description, name="description", curie=CORE.curie('description'),
model_uri=KS.description, domain=None, range=Optional[str])
slots.started_at_time = Slot(uri=PROV.startedAtTime, name="started at time", curie=PROV.curie('startedAtTime'),
model_uri=KS.started_at_time, domain=None, range=Optional[Union[str, XSDDate]])
slots.ended_at_time = Slot(uri=PROV.endedAtTime, name="ended at time", curie=PROV.curie('endedAtTime'),
model_uri=KS.ended_at_time, domain=None, range=Optional[Union[str, XSDDate]])
slots.was_informed_by = Slot(uri=PROV.wasInformedBy, name="was informed by", curie=PROV.curie('wasInformedBy'),
model_uri=KS.was_informed_by, domain=None, range=Optional[Union[str, ActivityId]])
slots.was_associated_with = Slot(uri=PROV.wasAssociatedWith, name="was associated with", curie=PROV.curie('wasAssociatedWith'),
model_uri=KS.was_associated_with, domain=None, range=Optional[Union[str, AgentId]])
slots.acted_on_behalf_of = Slot(uri=PROV.actedOnBehalfOf, name="acted on behalf of", curie=PROV.curie('actedOnBehalfOf'),
model_uri=KS.acted_on_behalf_of, domain=None, range=Optional[Union[str, AgentId]])
slots.was_generated_by = Slot(uri=PROV.wasGeneratedBy, name="was generated by", curie=PROV.curie('wasGeneratedBy'),
model_uri=KS.was_generated_by, domain=None, range=Optional[Union[str, ActivityId]])
slots.used = Slot(uri=PROV.used, name="used", curie=PROV.curie('used'),
model_uri=KS.used, domain=Activity, range=Optional[str])
slots.activity_set = Slot(uri=CORE.activity_set, name="activity set", curie=CORE.curie('activity_set'),
model_uri=KS.activity_set, domain=None, range=Optional[Union[Dict[Union[str, ActivityId], Union[dict, Activity]], List[Union[dict, Activity]]]])
slots.agent_set = Slot(uri=CORE.agent_set, name="agent set", curie=CORE.curie('agent_set'),
model_uri=KS.agent_set, domain=None, range=Optional[Union[Dict[Union[str, AgentId], Union[dict, Agent]], List[Union[dict, Agent]]]])
slots.hasAliases__aliases = Slot(uri=SKOS.altLabel, name="hasAliases__aliases", curie=SKOS.curie('altLabel'),
model_uri=KS.hasAliases__aliases, domain=None, range=Optional[Union[str, List[str]]])
slots.company__ceo = Slot(uri=SCHEMA.ceo, name="company__ceo", curie=SCHEMA.curie('ceo'),
model_uri=KS.company__ceo, domain=None, range=Optional[Union[str, PersonId]])
slots.dataset__persons = Slot(uri=KS.persons, name="dataset__persons", curie=KS.curie('persons'),
model_uri=KS.dataset__persons, domain=None, range=Optional[Union[Dict[Union[str, PersonId], Union[dict, Person]], List[Union[dict, Person]]]])
slots.dataset__companies = Slot(uri=KS.companies, name="dataset__companies", curie=KS.curie('companies'),
model_uri=KS.dataset__companies, domain=None, range=Optional[Union[Dict[Union[str, CompanyId], Union[dict, Company]], List[Union[dict, Company]]]])
slots.dataset__activities = Slot(uri=KS.activities, name="dataset__activities", curie=KS.curie('activities'),
model_uri=KS.dataset__activities, domain=None, range=Optional[Union[Dict[Union[str, ActivityId], Union[dict, Activity]], List[Union[dict, Activity]]]])
slots.dataset__code_systems = Slot(uri=KS.code_systems, name="dataset__code_systems", curie=KS.curie('code_systems'),
model_uri=KS.dataset__code_systems, domain=None, range=Optional[Union[Dict[Union[str, CodeSystemId], Union[dict, CodeSystem]], List[Union[dict, CodeSystem]]]])
slots.fakeClass__test_attribute = Slot(uri=KS.test_attribute, name="fakeClass__test_attribute", curie=KS.curie('test_attribute'),
model_uri=KS.fakeClass__test_attribute, domain=None, range=Optional[str])
slots.classWithSpaces__slot_with_space_1 = Slot(uri=KS.slot_with_space_1, name="classWithSpaces__slot_with_space_1", curie=KS.curie('slot_with_space_1'),
model_uri=KS.classWithSpaces__slot_with_space_1, domain=None, range=Optional[str])
slots.subclassTest__slot_with_space_2 = Slot(uri=KS.slot_with_space_2, name="subclassTest__slot_with_space_2", curie=KS.curie('slot_with_space_2'),
model_uri=KS.subclassTest__slot_with_space_2, domain=None, range=Optional[Union[dict, ClassWithSpaces]])
slots.Person_name = Slot(uri=CORE.name, name="Person_name", curie=CORE.curie('name'),
model_uri=KS.Person_name, domain=Person, range=Optional[str],
pattern=re.compile(r'^\S+ \S+'))
slots.FamilialRelationship_type = Slot(uri=KS.type, name="FamilialRelationship_type", curie=KS.curie('type'),
model_uri=KS.FamilialRelationship_type, domain=FamilialRelationship, range=Union[str, "FamilialRelationshipType"])
slots.FamilialRelationship_related_to = Slot(uri=KS.related_to, name="FamilialRelationship_related to", curie=KS.curie('related_to'),
model_uri=KS.FamilialRelationship_related_to, domain=FamilialRelationship, range=Union[str, PersonId])
slots.EmploymentEvent_type = Slot(uri=KS.type, name="EmploymentEvent_type", curie=KS.curie('type'),
model_uri=KS.EmploymentEvent_type, domain=EmploymentEvent, range=Optional[Union[str, "EmploymentEventType"]]) | 41.813747 | 178 | 0.697821 |
import dataclasses
import sys
import re
from jsonasobj2 import JsonObj, as_dict
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from linkml_runtime.linkml_model.meta import EnumDefinition, PermissibleValue, PvFormulaOptions
from linkml_runtime.utils.slot import Slot
from linkml_runtime.utils.metamodelcore import empty_list, empty_dict, bnode
from linkml_runtime.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
from linkml_runtime.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from linkml_runtime.utils.formatutils import camelcase, underscore, sfx
from linkml_runtime.utils.enumerations import EnumDefinitionImpl
from rdflib import Namespace, URIRef
from linkml_runtime.utils.curienamespace import CurieNamespace
from linkml_runtime.linkml_model.types import Boolean, Date, Integer, String
from linkml_runtime.utils.metamodelcore import Bool, XSDDate
metamodel_version = "1.7.0"
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
A = CurieNamespace('A', 'http://example.org/activities/')
BFO = CurieNamespace('BFO', 'http://purl.obolibrary.org/obo/BFO_')
CODE = CurieNamespace('CODE', 'http://example.org/code/')
P = CurieNamespace('P', 'http://example.org/person/')
RO = CurieNamespace('RO', 'http://purl.obolibrary.org/obo/RO_')
ROR = CurieNamespace('ROR', 'http://example.org/ror/')
BIOLINK = CurieNamespace('biolink', 'https://w3id.org/biolink/')
BIZCODES = CurieNamespace('bizcodes', 'https://example.org/bizcodes/')
CORE = CurieNamespace('core', 'https://w3id.org/linkml/tests/core/')
DCE = CurieNamespace('dce', 'http://purl.org/dc/elements/1.1/')
KS = CurieNamespace('ks', 'https://w3id.org/linkml/tests/kitchen_sink/')
LEGO = CurieNamespace('lego', 'http://geneontology.org/lego/')
LINKML = CurieNamespace('linkml', 'https://w3id.org/linkml/')
PAV = CurieNamespace('pav', 'http://purl.org/pav/')
PROV = CurieNamespace('prov', 'http://www.w3.org/ns/prov#')
SCHEMA = CurieNamespace('schema', 'http://schema.org/')
SKOS = CurieNamespace('skos', 'http://www.w3.org/2004/02/skos/core#')
DEFAULT_ = KS
class PersonId(extended_str):
pass
class OrganizationId(extended_str):
pass
class PlaceId(extended_str):
pass
class ConceptId(extended_str):
pass
class DiagnosisConceptId(ConceptId):
pass
class ProcedureConceptId(ConceptId):
pass
class CompanyId(OrganizationId):
pass
class CodeSystemId(extended_str):
pass
class ActivityId(extended_str):
pass
class AgentId(extended_str):
pass
@dataclass
class HasAliases(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.HasAliases
class_class_curie: ClassVar[str] = "ks:HasAliases"
class_name: ClassVar[str] = "HasAliases"
class_model_uri: ClassVar[URIRef] = KS.HasAliases
aliases: Optional[Union[str, List[str]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if not isinstance(self.aliases, list):
self.aliases = [self.aliases] if self.aliases is not None else []
self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases]
super().__post_init__(**kwargs)
@dataclass
class Friend(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Friend
class_class_curie: ClassVar[str] = "ks:Friend"
class_name: ClassVar[str] = "Friend"
class_model_uri: ClassVar[URIRef] = KS.Friend
name: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
super().__post_init__(**kwargs)
@dataclass
class Person(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Person
class_class_curie: ClassVar[str] = "ks:Person"
class_name: ClassVar[str] = "Person"
class_model_uri: ClassVar[URIRef] = KS.Person
id: Union[str, PersonId] = None
name: Optional[str] = None
has_employment_history: Optional[Union[Union[dict, "EmploymentEvent"], List[Union[dict, "EmploymentEvent"]]]] = empty_list()
has_familial_relationships: Optional[Union[Union[dict, "FamilialRelationship"], List[Union[dict, "FamilialRelationship"]]]] = empty_list()
has_medical_history: Optional[Union[Union[dict, "MedicalEvent"], List[Union[dict, "MedicalEvent"]]]] = empty_list()
age_in_years: Optional[int] = None
addresses: Optional[Union[Union[dict, "Address"], List[Union[dict, "Address"]]]] = empty_list()
has_birth_event: Optional[Union[dict, "BirthEvent"]] = None
aliases: Optional[Union[str, List[str]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, PersonId):
self.id = PersonId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
if not isinstance(self.has_employment_history, list):
self.has_employment_history = [self.has_employment_history] if self.has_employment_history is not None else []
self.has_employment_history = [v if isinstance(v, EmploymentEvent) else EmploymentEvent(**as_dict(v)) for v in self.has_employment_history]
self._normalize_inlined_as_list(slot_name="has_familial_relationships", slot_type=FamilialRelationship, key_name="type", keyed=False)
if not isinstance(self.has_medical_history, list):
self.has_medical_history = [self.has_medical_history] if self.has_medical_history is not None else []
self.has_medical_history = [v if isinstance(v, MedicalEvent) else MedicalEvent(**as_dict(v)) for v in self.has_medical_history]
if self.age_in_years is not None and not isinstance(self.age_in_years, int):
self.age_in_years = int(self.age_in_years)
if not isinstance(self.addresses, list):
self.addresses = [self.addresses] if self.addresses is not None else []
self.addresses = [v if isinstance(v, Address) else Address(**as_dict(v)) for v in self.addresses]
if self.has_birth_event is not None and not isinstance(self.has_birth_event, BirthEvent):
self.has_birth_event = BirthEvent(**as_dict(self.has_birth_event))
if not isinstance(self.aliases, list):
self.aliases = [self.aliases] if self.aliases is not None else []
self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases]
super().__post_init__(**kwargs)
@dataclass
class Organization(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Organization
class_class_curie: ClassVar[str] = "ks:Organization"
class_name: ClassVar[str] = "Organization"
class_model_uri: ClassVar[URIRef] = KS.Organization
id: Union[str, OrganizationId] = None
name: Optional[str] = None
aliases: Optional[Union[str, List[str]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, OrganizationId):
self.id = OrganizationId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
if not isinstance(self.aliases, list):
self.aliases = [self.aliases] if self.aliases is not None else []
self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases]
super().__post_init__(**kwargs)
@dataclass
class Place(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Place
class_class_curie: ClassVar[str] = "ks:Place"
class_name: ClassVar[str] = "Place"
class_model_uri: ClassVar[URIRef] = KS.Place
id: Union[str, PlaceId] = None
name: Optional[str] = None
aliases: Optional[Union[str, List[str]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, PlaceId):
self.id = PlaceId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
if not isinstance(self.aliases, list):
self.aliases = [self.aliases] if self.aliases is not None else []
self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases]
super().__post_init__(**kwargs)
@dataclass
class Address(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Address
class_class_curie: ClassVar[str] = "ks:Address"
class_name: ClassVar[str] = "Address"
class_model_uri: ClassVar[URIRef] = KS.Address
street: Optional[str] = None
city: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.street is not None and not isinstance(self.street, str):
self.street = str(self.street)
if self.city is not None and not isinstance(self.city, str):
self.city = str(self.city)
super().__post_init__(**kwargs)
@dataclass
class Concept(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Concept
class_class_curie: ClassVar[str] = "ks:Concept"
class_name: ClassVar[str] = "Concept"
class_model_uri: ClassVar[URIRef] = KS.Concept
id: Union[str, ConceptId] = None
name: Optional[str] = None
in_code_system: Optional[Union[str, CodeSystemId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ConceptId):
self.id = ConceptId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
if self.in_code_system is not None and not isinstance(self.in_code_system, CodeSystemId):
self.in_code_system = CodeSystemId(self.in_code_system)
super().__post_init__(**kwargs)
@dataclass
class DiagnosisConcept(Concept):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.DiagnosisConcept
class_class_curie: ClassVar[str] = "ks:DiagnosisConcept"
class_name: ClassVar[str] = "DiagnosisConcept"
class_model_uri: ClassVar[URIRef] = KS.DiagnosisConcept
id: Union[str, DiagnosisConceptId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, DiagnosisConceptId):
self.id = DiagnosisConceptId(self.id)
super().__post_init__(**kwargs)
@dataclass
class ProcedureConcept(Concept):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.ProcedureConcept
class_class_curie: ClassVar[str] = "ks:ProcedureConcept"
class_name: ClassVar[str] = "ProcedureConcept"
class_model_uri: ClassVar[URIRef] = KS.ProcedureConcept
id: Union[str, ProcedureConceptId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ProcedureConceptId):
self.id = ProcedureConceptId(self.id)
super().__post_init__(**kwargs)
@dataclass
class Event(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Event
class_class_curie: ClassVar[str] = "ks:Event"
class_name: ClassVar[str] = "Event"
class_model_uri: ClassVar[URIRef] = KS.Event
started_at_time: Optional[Union[str, XSDDate]] = None
ended_at_time: Optional[Union[str, XSDDate]] = None
is_current: Optional[Union[bool, Bool]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.started_at_time is not None and not isinstance(self.started_at_time, XSDDate):
self.started_at_time = XSDDate(self.started_at_time)
if self.ended_at_time is not None and not isinstance(self.ended_at_time, XSDDate):
self.ended_at_time = XSDDate(self.ended_at_time)
if self.is_current is not None and not isinstance(self.is_current, Bool):
self.is_current = Bool(self.is_current)
super().__post_init__(**kwargs)
@dataclass
class Relationship(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Relationship
class_class_curie: ClassVar[str] = "ks:Relationship"
class_name: ClassVar[str] = "Relationship"
class_model_uri: ClassVar[URIRef] = KS.Relationship
started_at_time: Optional[Union[str, XSDDate]] = None
ended_at_time: Optional[Union[str, XSDDate]] = None
related_to: Optional[str] = None
type: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.started_at_time is not None and not isinstance(self.started_at_time, XSDDate):
self.started_at_time = XSDDate(self.started_at_time)
if self.ended_at_time is not None and not isinstance(self.ended_at_time, XSDDate):
self.ended_at_time = XSDDate(self.ended_at_time)
if self.related_to is not None and not isinstance(self.related_to, str):
self.related_to = str(self.related_to)
if self.type is not None and not isinstance(self.type, str):
self.type = str(self.type)
super().__post_init__(**kwargs)
@dataclass
class FamilialRelationship(Relationship):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.FamilialRelationship
class_class_curie: ClassVar[str] = "ks:FamilialRelationship"
class_name: ClassVar[str] = "FamilialRelationship"
class_model_uri: ClassVar[URIRef] = KS.FamilialRelationship
type: Union[str, "FamilialRelationshipType"] = None
related_to: Union[str, PersonId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.type):
self.MissingRequiredField("type")
if not isinstance(self.type, FamilialRelationshipType):
self.type = FamilialRelationshipType(self.type)
if self._is_empty(self.related_to):
self.MissingRequiredField("related_to")
if not isinstance(self.related_to, PersonId):
self.related_to = PersonId(self.related_to)
super().__post_init__(**kwargs)
@dataclass
class BirthEvent(Event):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.BirthEvent
class_class_curie: ClassVar[str] = "ks:BirthEvent"
class_name: ClassVar[str] = "BirthEvent"
class_model_uri: ClassVar[URIRef] = KS.BirthEvent
in_location: Optional[Union[str, PlaceId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.in_location is not None and not isinstance(self.in_location, PlaceId):
self.in_location = PlaceId(self.in_location)
super().__post_init__(**kwargs)
@dataclass
class EmploymentEvent(Event):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.EmploymentEvent
class_class_curie: ClassVar[str] = "ks:EmploymentEvent"
class_name: ClassVar[str] = "EmploymentEvent"
class_model_uri: ClassVar[URIRef] = KS.EmploymentEvent
employed_at: Optional[Union[str, CompanyId]] = None
type: Optional[Union[str, "EmploymentEventType"]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.employed_at is not None and not isinstance(self.employed_at, CompanyId):
self.employed_at = CompanyId(self.employed_at)
if self.type is not None and not isinstance(self.type, EmploymentEventType):
self.type = EmploymentEventType(self.type)
super().__post_init__(**kwargs)
@dataclass
class MedicalEvent(Event):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.MedicalEvent
class_class_curie: ClassVar[str] = "ks:MedicalEvent"
class_name: ClassVar[str] = "MedicalEvent"
class_model_uri: ClassVar[URIRef] = KS.MedicalEvent
in_location: Optional[Union[str, PlaceId]] = None
diagnosis: Optional[Union[dict, DiagnosisConcept]] = None
procedure: Optional[Union[dict, ProcedureConcept]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.in_location is not None and not isinstance(self.in_location, PlaceId):
self.in_location = PlaceId(self.in_location)
if self.diagnosis is not None and not isinstance(self.diagnosis, DiagnosisConcept):
self.diagnosis = DiagnosisConcept(**as_dict(self.diagnosis))
if self.procedure is not None and not isinstance(self.procedure, ProcedureConcept):
self.procedure = ProcedureConcept(**as_dict(self.procedure))
super().__post_init__(**kwargs)
@dataclass
class WithLocation(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.WithLocation
class_class_curie: ClassVar[str] = "ks:WithLocation"
class_name: ClassVar[str] = "WithLocation"
class_model_uri: ClassVar[URIRef] = KS.WithLocation
in_location: Optional[Union[str, PlaceId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.in_location is not None and not isinstance(self.in_location, PlaceId):
self.in_location = PlaceId(self.in_location)
super().__post_init__(**kwargs)
@dataclass
class MarriageEvent(Event):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.MarriageEvent
class_class_curie: ClassVar[str] = "ks:MarriageEvent"
class_name: ClassVar[str] = "MarriageEvent"
class_model_uri: ClassVar[URIRef] = KS.MarriageEvent
married_to: Optional[Union[str, PersonId]] = None
in_location: Optional[Union[str, PlaceId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.married_to is not None and not isinstance(self.married_to, PersonId):
self.married_to = PersonId(self.married_to)
if self.in_location is not None and not isinstance(self.in_location, PlaceId):
self.in_location = PlaceId(self.in_location)
super().__post_init__(**kwargs)
@dataclass
class Company(Organization):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Company
class_class_curie: ClassVar[str] = "ks:Company"
class_name: ClassVar[str] = "Company"
class_model_uri: ClassVar[URIRef] = KS.Company
id: Union[str, CompanyId] = None
ceo: Optional[Union[str, PersonId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, CompanyId):
self.id = CompanyId(self.id)
if self.ceo is not None and not isinstance(self.ceo, PersonId):
self.ceo = PersonId(self.ceo)
super().__post_init__(**kwargs)
@dataclass
class CodeSystem(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.CodeSystem
class_class_curie: ClassVar[str] = "ks:CodeSystem"
class_name: ClassVar[str] = "CodeSystem"
class_model_uri: ClassVar[URIRef] = KS.CodeSystem
id: Union[str, CodeSystemId] = None
name: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, CodeSystemId):
self.id = CodeSystemId(self.id)
if self.name is not None and not isinstance(self.name, str):
self.name = str(self.name)
super().__post_init__(**kwargs)
@dataclass
class Dataset(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.Dataset
class_class_curie: ClassVar[str] = "ks:Dataset"
class_name: ClassVar[str] = "Dataset"
class_model_uri: ClassVar[URIRef] = KS.Dataset
persons: Optional[Union[Dict[Union[str, PersonId], Union[dict, Person]], List[Union[dict, Person]]]] = empty_dict()
companies: Optional[Union[Dict[Union[str, CompanyId], Union[dict, Company]], List[Union[dict, Company]]]] = empty_dict()
activities: Optional[Union[Dict[Union[str, ActivityId], Union[dict, "Activity"]], List[Union[dict, "Activity"]]]] = empty_dict()
code_systems: Optional[Union[Dict[Union[str, CodeSystemId], Union[dict, CodeSystem]], List[Union[dict, CodeSystem]]]] = empty_dict()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
self._normalize_inlined_as_list(slot_name="persons", slot_type=Person, key_name="id", keyed=True)
self._normalize_inlined_as_list(slot_name="companies", slot_type=Company, key_name="id", keyed=True)
self._normalize_inlined_as_list(slot_name="activities", slot_type=Activity, key_name="id", keyed=True)
self._normalize_inlined_as_dict(slot_name="code_systems", slot_type=CodeSystem, key_name="id", keyed=True)
super().__post_init__(**kwargs)
@dataclass
class FakeClass(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.FakeClass
class_class_curie: ClassVar[str] = "ks:FakeClass"
class_name: ClassVar[str] = "FakeClass"
class_model_uri: ClassVar[URIRef] = KS.FakeClass
test_attribute: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.test_attribute is not None and not isinstance(self.test_attribute, str):
self.test_attribute = str(self.test_attribute)
super().__post_init__(**kwargs)
@dataclass
class ClassWithSpaces(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.ClassWithSpaces
class_class_curie: ClassVar[str] = "ks:ClassWithSpaces"
class_name: ClassVar[str] = "class with spaces"
class_model_uri: ClassVar[URIRef] = KS.ClassWithSpaces
slot_with_space_1: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.slot_with_space_1 is not None and not isinstance(self.slot_with_space_1, str):
self.slot_with_space_1 = str(self.slot_with_space_1)
super().__post_init__(**kwargs)
@dataclass
class SubclassTest(ClassWithSpaces):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = KS.SubclassTest
class_class_curie: ClassVar[str] = "ks:SubclassTest"
class_name: ClassVar[str] = "subclass test"
class_model_uri: ClassVar[URIRef] = KS.SubclassTest
slot_with_space_2: Optional[Union[dict, ClassWithSpaces]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.slot_with_space_2 is not None and not isinstance(self.slot_with_space_2, ClassWithSpaces):
self.slot_with_space_2 = ClassWithSpaces(**as_dict(self.slot_with_space_2))
super().__post_init__(**kwargs)
@dataclass
class Activity(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = CORE.Activity
class_class_curie: ClassVar[str] = "core:Activity"
class_name: ClassVar[str] = "activity"
class_model_uri: ClassVar[URIRef] = KS.Activity
id: Union[str, ActivityId] = None
started_at_time: Optional[Union[str, XSDDate]] = None
ended_at_time: Optional[Union[str, XSDDate]] = None
was_informed_by: Optional[Union[str, ActivityId]] = None
was_associated_with: Optional[Union[str, AgentId]] = None
used: Optional[str] = None
description: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ActivityId):
self.id = ActivityId(self.id)
if self.started_at_time is not None and not isinstance(self.started_at_time, XSDDate):
self.started_at_time = XSDDate(self.started_at_time)
if self.ended_at_time is not None and not isinstance(self.ended_at_time, XSDDate):
self.ended_at_time = XSDDate(self.ended_at_time)
if self.was_informed_by is not None and not isinstance(self.was_informed_by, ActivityId):
self.was_informed_by = ActivityId(self.was_informed_by)
if self.was_associated_with is not None and not isinstance(self.was_associated_with, AgentId):
self.was_associated_with = AgentId(self.was_associated_with)
if self.used is not None and not isinstance(self.used, str):
self.used = str(self.used)
if self.description is not None and not isinstance(self.description, str):
self.description = str(self.description)
super().__post_init__(**kwargs)
@dataclass
class Agent(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = PROV.Agent
class_class_curie: ClassVar[str] = "prov:Agent"
class_name: ClassVar[str] = "agent"
class_model_uri: ClassVar[URIRef] = KS.Agent
id: Union[str, AgentId] = None
acted_on_behalf_of: Optional[Union[str, AgentId]] = None
was_informed_by: Optional[Union[str, ActivityId]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, AgentId):
self.id = AgentId(self.id)
if self.acted_on_behalf_of is not None and not isinstance(self.acted_on_behalf_of, AgentId):
self.acted_on_behalf_of = AgentId(self.acted_on_behalf_of)
if self.was_informed_by is not None and not isinstance(self.was_informed_by, ActivityId):
self.was_informed_by = ActivityId(self.was_informed_by)
super().__post_init__(**kwargs)
class FamilialRelationshipType(EnumDefinitionImpl):
SIBLING_OF = PermissibleValue(text="SIBLING_OF")
PARENT_OF = PermissibleValue(text="PARENT_OF")
CHILD_OF = PermissibleValue(text="CHILD_OF")
_defn = EnumDefinition(
name="FamilialRelationshipType",
)
class DiagnosisType(EnumDefinitionImpl):
TODO = PermissibleValue(text="TODO")
_defn = EnumDefinition(
name="DiagnosisType",
)
class EmploymentEventType(EnumDefinitionImpl):
HIRE = PermissibleValue(text="HIRE",
meaning=BIZCODES["001"])
FIRE = PermissibleValue(text="FIRE",
meaning=BIZCODES["002"])
PROMOTION = PermissibleValue(text="PROMOTION",
meaning=BIZCODES["003"])
TRANSFER = PermissibleValue(text="TRANSFER",
meaning=BIZCODES["004"])
_defn = EnumDefinition(
name="EmploymentEventType",
)
class slots:
pass
slots.employed_at = Slot(uri=KS.employed_at, name="employed at", curie=KS.curie('employed_at'),
model_uri=KS.employed_at, domain=None, range=Optional[Union[str, CompanyId]])
slots.is_current = Slot(uri=KS.is_current, name="is current", curie=KS.curie('is_current'),
model_uri=KS.is_current, domain=None, range=Optional[Union[bool, Bool]])
slots.has_employment_history = Slot(uri=KS.has_employment_history, name="has employment history", curie=KS.curie('has_employment_history'),
model_uri=KS.has_employment_history, domain=None, range=Optional[Union[Union[dict, EmploymentEvent], List[Union[dict, EmploymentEvent]]]])
slots.has_marriage_history = Slot(uri=KS.has_marriage_history, name="has marriage history", curie=KS.curie('has_marriage_history'),
model_uri=KS.has_marriage_history, domain=None, range=Optional[Union[Union[dict, MarriageEvent], List[Union[dict, MarriageEvent]]]])
slots.has_medical_history = Slot(uri=KS.has_medical_history, name="has medical history", curie=KS.curie('has_medical_history'),
model_uri=KS.has_medical_history, domain=None, range=Optional[Union[Union[dict, MedicalEvent], List[Union[dict, MedicalEvent]]]])
slots.has_familial_relationships = Slot(uri=KS.has_familial_relationships, name="has familial relationships", curie=KS.curie('has_familial_relationships'),
model_uri=KS.has_familial_relationships, domain=None, range=Optional[Union[Union[dict, FamilialRelationship], List[Union[dict, FamilialRelationship]]]])
slots.married_to = Slot(uri=KS.married_to, name="married to", curie=KS.curie('married_to'),
model_uri=KS.married_to, domain=None, range=Optional[Union[str, PersonId]])
slots.in_location = Slot(uri=KS.in_location, name="in location", curie=KS.curie('in_location'),
model_uri=KS.in_location, domain=None, range=Optional[Union[str, PlaceId]])
slots.diagnosis = Slot(uri=KS.diagnosis, name="diagnosis", curie=KS.curie('diagnosis'),
model_uri=KS.diagnosis, domain=None, range=Optional[Union[dict, DiagnosisConcept]])
slots.procedure = Slot(uri=KS.procedure, name="procedure", curie=KS.curie('procedure'),
model_uri=KS.procedure, domain=None, range=Optional[Union[dict, ProcedureConcept]])
slots.addresses = Slot(uri=KS.addresses, name="addresses", curie=KS.curie('addresses'),
model_uri=KS.addresses, domain=None, range=Optional[Union[Union[dict, Address], List[Union[dict, Address]]]])
slots.age_in_years = Slot(uri=KS.age_in_years, name="age in years", curie=KS.curie('age_in_years'),
model_uri=KS.age_in_years, domain=None, range=Optional[int])
slots.related_to = Slot(uri=KS.related_to, name="related to", curie=KS.curie('related_to'),
model_uri=KS.related_to, domain=None, range=Optional[str])
slots.type = Slot(uri=KS.type, name="type", curie=KS.curie('type'),
model_uri=KS.type, domain=None, range=Optional[str])
slots.street = Slot(uri=KS.street, name="street", curie=KS.curie('street'),
model_uri=KS.street, domain=None, range=Optional[str])
slots.city = Slot(uri=KS.city, name="city", curie=KS.curie('city'),
model_uri=KS.city, domain=None, range=Optional[str])
slots.has_birth_event = Slot(uri=KS.has_birth_event, name="has birth event", curie=KS.curie('has_birth_event'),
model_uri=KS.has_birth_event, domain=None, range=Optional[Union[dict, BirthEvent]])
slots.in_code_system = Slot(uri=KS.in_code_system, name="in code system", curie=KS.curie('in_code_system'),
model_uri=KS.in_code_system, domain=None, range=Optional[Union[str, CodeSystemId]])
slots.id = Slot(uri=CORE.id, name="id", curie=CORE.curie('id'),
model_uri=KS.id, domain=None, range=URIRef)
slots.name = Slot(uri=CORE.name, name="name", curie=CORE.curie('name'),
model_uri=KS.name, domain=None, range=Optional[str])
slots.description = Slot(uri=CORE.description, name="description", curie=CORE.curie('description'),
model_uri=KS.description, domain=None, range=Optional[str])
slots.started_at_time = Slot(uri=PROV.startedAtTime, name="started at time", curie=PROV.curie('startedAtTime'),
model_uri=KS.started_at_time, domain=None, range=Optional[Union[str, XSDDate]])
slots.ended_at_time = Slot(uri=PROV.endedAtTime, name="ended at time", curie=PROV.curie('endedAtTime'),
model_uri=KS.ended_at_time, domain=None, range=Optional[Union[str, XSDDate]])
slots.was_informed_by = Slot(uri=PROV.wasInformedBy, name="was informed by", curie=PROV.curie('wasInformedBy'),
model_uri=KS.was_informed_by, domain=None, range=Optional[Union[str, ActivityId]])
slots.was_associated_with = Slot(uri=PROV.wasAssociatedWith, name="was associated with", curie=PROV.curie('wasAssociatedWith'),
model_uri=KS.was_associated_with, domain=None, range=Optional[Union[str, AgentId]])
slots.acted_on_behalf_of = Slot(uri=PROV.actedOnBehalfOf, name="acted on behalf of", curie=PROV.curie('actedOnBehalfOf'),
model_uri=KS.acted_on_behalf_of, domain=None, range=Optional[Union[str, AgentId]])
slots.was_generated_by = Slot(uri=PROV.wasGeneratedBy, name="was generated by", curie=PROV.curie('wasGeneratedBy'),
model_uri=KS.was_generated_by, domain=None, range=Optional[Union[str, ActivityId]])
slots.used = Slot(uri=PROV.used, name="used", curie=PROV.curie('used'),
model_uri=KS.used, domain=Activity, range=Optional[str])
slots.activity_set = Slot(uri=CORE.activity_set, name="activity set", curie=CORE.curie('activity_set'),
model_uri=KS.activity_set, domain=None, range=Optional[Union[Dict[Union[str, ActivityId], Union[dict, Activity]], List[Union[dict, Activity]]]])
slots.agent_set = Slot(uri=CORE.agent_set, name="agent set", curie=CORE.curie('agent_set'),
model_uri=KS.agent_set, domain=None, range=Optional[Union[Dict[Union[str, AgentId], Union[dict, Agent]], List[Union[dict, Agent]]]])
slots.hasAliases__aliases = Slot(uri=SKOS.altLabel, name="hasAliases__aliases", curie=SKOS.curie('altLabel'),
model_uri=KS.hasAliases__aliases, domain=None, range=Optional[Union[str, List[str]]])
slots.company__ceo = Slot(uri=SCHEMA.ceo, name="company__ceo", curie=SCHEMA.curie('ceo'),
model_uri=KS.company__ceo, domain=None, range=Optional[Union[str, PersonId]])
slots.dataset__persons = Slot(uri=KS.persons, name="dataset__persons", curie=KS.curie('persons'),
model_uri=KS.dataset__persons, domain=None, range=Optional[Union[Dict[Union[str, PersonId], Union[dict, Person]], List[Union[dict, Person]]]])
slots.dataset__companies = Slot(uri=KS.companies, name="dataset__companies", curie=KS.curie('companies'),
model_uri=KS.dataset__companies, domain=None, range=Optional[Union[Dict[Union[str, CompanyId], Union[dict, Company]], List[Union[dict, Company]]]])
slots.dataset__activities = Slot(uri=KS.activities, name="dataset__activities", curie=KS.curie('activities'),
model_uri=KS.dataset__activities, domain=None, range=Optional[Union[Dict[Union[str, ActivityId], Union[dict, Activity]], List[Union[dict, Activity]]]])
slots.dataset__code_systems = Slot(uri=KS.code_systems, name="dataset__code_systems", curie=KS.curie('code_systems'),
model_uri=KS.dataset__code_systems, domain=None, range=Optional[Union[Dict[Union[str, CodeSystemId], Union[dict, CodeSystem]], List[Union[dict, CodeSystem]]]])
slots.fakeClass__test_attribute = Slot(uri=KS.test_attribute, name="fakeClass__test_attribute", curie=KS.curie('test_attribute'),
model_uri=KS.fakeClass__test_attribute, domain=None, range=Optional[str])
slots.classWithSpaces__slot_with_space_1 = Slot(uri=KS.slot_with_space_1, name="classWithSpaces__slot_with_space_1", curie=KS.curie('slot_with_space_1'),
model_uri=KS.classWithSpaces__slot_with_space_1, domain=None, range=Optional[str])
slots.subclassTest__slot_with_space_2 = Slot(uri=KS.slot_with_space_2, name="subclassTest__slot_with_space_2", curie=KS.curie('slot_with_space_2'),
model_uri=KS.subclassTest__slot_with_space_2, domain=None, range=Optional[Union[dict, ClassWithSpaces]])
slots.Person_name = Slot(uri=CORE.name, name="Person_name", curie=CORE.curie('name'),
model_uri=KS.Person_name, domain=Person, range=Optional[str],
pattern=re.compile(r'^\S+ \S+'))
slots.FamilialRelationship_type = Slot(uri=KS.type, name="FamilialRelationship_type", curie=KS.curie('type'),
model_uri=KS.FamilialRelationship_type, domain=FamilialRelationship, range=Union[str, "FamilialRelationshipType"])
slots.FamilialRelationship_related_to = Slot(uri=KS.related_to, name="FamilialRelationship_related to", curie=KS.curie('related_to'),
model_uri=KS.FamilialRelationship_related_to, domain=FamilialRelationship, range=Union[str, PersonId])
slots.EmploymentEvent_type = Slot(uri=KS.type, name="EmploymentEvent_type", curie=KS.curie('type'),
model_uri=KS.EmploymentEvent_type, domain=EmploymentEvent, range=Optional[Union[str, "EmploymentEventType"]]) | true | true |
1c319f1a9bda9948051b92df81c9a19df60a9bfe | 83,806 | py | Python | test/distributed/test_c10d_gloo.py | photoszzt/pytorch | 179249084b66b8fbacbd95d39b7cbce0cd9eb972 | [
"Intel"
] | 1 | 2021-07-18T18:58:07.000Z | 2021-07-18T18:58:07.000Z | test/distributed/test_c10d_gloo.py | JINAY08/pytorch | f2857883c4c148ced4f920431b38532fe8081b73 | [
"Intel"
] | null | null | null | test/distributed/test_c10d_gloo.py | JINAY08/pytorch | f2857883c4c148ced4f920431b38532fe8081b73 | [
"Intel"
] | null | null | null | import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
import unittest
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
skip_if_lt_x_gpu,
simple_sparse_reduce_tests,
skip_if_win32,
create_device,
verify_ddp_error_logged,
skip_if_rocm,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_TSAN,
)
import test_c10d_common
from test_c10d_common import (
LOOPBACK,
gpus_for_rank,
Task,
ModuleForDdpCommHook,
SparseGradientModule,
)
def simple_reduce_tests(rank, world_size):
tests = [
(
c10d.ReduceOp.SUM,
torch.tensor([rank + 1.0]),
torch.tensor([float(world_size * (world_size + 1) / 2)]),
),
(
c10d.ReduceOp.PRODUCT,
torch.tensor([rank + 1.0]),
torch.tensor([float(math.factorial(world_size))]),
),
(
c10d.ReduceOp.MIN,
torch.tensor([rank + 1.0]),
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
torch.tensor([rank + 1.0]),
torch.tensor([world_size]),
),
]
# Generate tests for BAND.
# The bit that is set changes in every iteration to check
# that the output changes accordingly.
for i in range(4):
vin = rank | (1 << i)
vout = 1 << i
tests.append(
(
c10d.ReduceOp.BAND,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
# Generate tests for BOR.
# These emulate a larger world size per iteration by having every
# rank contribute multiple values that are pre-OR'ed.
for i in range(1, 5):
vin = reduce(operator.or_, [rank * i + j for j in range(i)])
vout = reduce(operator.or_, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
# Generate tests for XOR.
# These emulate a larger world size per iteration by having every
# rank contribute multiple values that are pre-XOR'ed.
for i in range(1, 5):
vin = reduce(operator.xor, [rank * i + j for j in range(i)])
vout = reduce(operator.xor, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BXOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
return tests
def simple_coalesced_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([rank + 1]), torch.tensor([(rank + 1) ** 2])],
[
torch.tensor([float(world_size * (world_size + 1) / 2)]),
torch.tensor(
[float(world_size * (world_size + 1) * (2 * world_size + 1) / 6)]
),
],
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([rank + 1.0]), torch.tensor([rank + 2.0])],
[
torch.tensor([float(math.factorial(world_size))]),
torch.tensor([float(math.factorial(world_size + 1))]),
],
),
(
c10d.ReduceOp.MIN,
[torch.tensor([rank + x]) for x in [0.0, 1.0]],
[torch.tensor([0.0]), torch.tensor([1.0])],
),
(
c10d.ReduceOp.MAX,
[torch.tensor([rank + x]) for x in [1.0, 2.0]],
[torch.tensor([world_size]), torch.tensor([world_size + 1.0])],
),
]
def simple_multi_input_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([2 * rank + 0.0]), torch.tensor([2 * rank + 1.0])],
torch.tensor([float(world_size * (2 * world_size - 1))]),
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([float(math.factorial(2 * world_size))]),
),
(
c10d.ReduceOp.MIN,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([2 * world_size]),
),
]
class RendezvousEnvTest(TestCase):
@requires_gloo()
@retry_on_connect_failures
def test_logging_init(self):
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(common.find_free_port())
os.environ["RANK"] = "0"
previous_handlers = logging.root.handlers
c10d.init_process_group(backend="gloo", init_method="env://")
current_handlers = logging.root.handlers
self.assertEqual(len(previous_handlers), len(current_handlers))
for current, previous in zip(current_handlers, previous_handlers):
self.assertEqual(current, previous)
c10d.destroy_process_group()
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_gloo()
@retry_on_connect_failures
def test_default_store_timeout_gloo(self):
self._test_default_store_timeout("gloo")
@requires_gloo()
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class ProcessGroupGlooTest(MultiProcessTestCase):
def setUp(self):
super(ProcessGroupGlooTest, self).setUp()
# For Windows platform, Python does not support fork, change it to spawn here.
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def opts(self, threads=2):
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
opts._devices = [create_device(interface=LOOPBACK)]
opts._threads = threads
return opts
def test_multi_device_constructor(self):
store = c10d.FileStore(self.file_name, self.world_size)
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
opts._devices = [
create_device(interface=LOOPBACK),
create_device(interface=LOOPBACK),
]
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, opts)
# Execute 2x the number of operations to ensure we use every device.
for fut in [pg.allreduce(torch.ones(i + 1)).get_future() for i in range(4)]:
fut.wait()
def test_empty_tensors(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
xs = [torch.FloatTensor([])]
fut = pg.broadcast(xs).get_future()
fut.wait()
output = fut.value()
self.assertEqual(0, output[0].numel())
self.assertEqualIgnoreType(xs[0], output[0])
def test_broadcast_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = -1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t3], opts)
def _test_broadcast_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
fut = pg.broadcast(xs, opts).get_future()
fut.wait()
return fut.value()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = fn(torch.tensor([self.rank]))
output = broadcast([x], i, 0)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i]), output[0])
# Run with 2 input tensors
num = 2
for j in range(num):
xs = [
fn(torch.tensor([self.rank * num + 0.0])),
fn(torch.tensor([self.rank * num + 1.0])),
]
output = broadcast(xs, i, j)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), output[0])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), output[1])
# Test overloaded convenience function
x = torch.tensor([self.rank + 1.0])
fut = pg.broadcast(x, root=0).get_future()
fut.wait()
result = fut.value()
self.assertEqual(torch.tensor([1.0]), result[0])
def test_broadcast_basics(self):
self._test_broadcast_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_broadcast_basics_cuda(self):
self._test_broadcast_basics(lambda t: t.clone().cuda())
def _test_broadcast_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [
pg.broadcast(inputs[i], root=(i % self.world_size))
for i in range(len(inputs))
]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
torch.tensor([(i * self.world_size) + (i % self.world_size)]),
inputs[i],
msg=("Mismatch in iteration %d" % i),
)
def test_broadcast_stress(self):
inputs = [torch.tensor([i * self.world_size + self.rank]) for i in range(1000)]
self._test_broadcast_stress(inputs)
@skip_if_lt_x_gpu(2)
def test_broadcast_stress_cuda(self):
inputs = [
torch.tensor([i * self.world_size + self.rank]).cuda() for i in range(1000)
]
self._test_broadcast_stress(inputs)
def test_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t3], opts)
def _test_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for (op, input, expected) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
fut = pg.allreduce([tensor], opts).get_future()
fut.wait()
result = fut.value()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, result[0])
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors, opts).get_future()
fut.wait()
result = fut.value()
for tensor in result:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
fut = pg.allreduce(x).get_future()
fut.wait()
result = fut.value()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]), result[0]
)
def test_allreduce_basics(self):
self._test_allreduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allreduce_basics_cuda(self):
self._test_allreduce_basics(lambda t: t.clone().cuda())
# _using_work_api tests are to make sure we still properly support work API.
# This should go away as we deprecate it.
def _test_allreduce_basics_using_work_api(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for (op, input, expected) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
work = pg.allreduce([tensor], opts)
work.wait()
result = work.result()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, result[0])
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
work = pg.allreduce(tensors, opts)
work.wait()
result = work.result()
for tensor in result:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
work = pg.allreduce(x)
work.wait()
result = work.result()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]), result[0]
)
def test_allreduce_basics_using_work_api(self):
self._test_allreduce_basics_using_work_api(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allreduce_basics_cuda_using_work_api(self):
self._test_allreduce_basics_using_work_api(lambda t: t.clone().cuda())
def _test_allreduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = [pg.allreduce(inputs[i]).get_future() for i in range(len(inputs))]
for i, future_handle in enumerate(future_handles):
future_handle.wait()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
future_handle.value()[0],
msg=("Mismatch in iteration %d" % i),
)
def test_allreduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allreduce_stress(inputs)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_allreduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allreduce_stress(inputs)
def test_allreduce_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros(1, dtype=torch.float32)
t2 = torch.zeros(1, dtype=torch.float64)
t3 = torch.sparse_coo_tensor([[0]], [1], size=(1,))
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([], opts)
with self.assertRaisesRegex(ValueError, "tensors must all have the same type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor layout at index"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t3], opts)
with self.assertRaisesRegex(ValueError, "unsupported layout"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t3, t3.clone()], opts)
@skip_if_lt_x_gpu(1)
def test_allreduce_coalesced_checks_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros(1, dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "unsupported device type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1.cuda(), t1.cuda()], opts)
def _test_allreduce_coalesced_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
test_cases = simple_coalesced_reduce_tests(self.rank, self.world_size)
for op, inputs, outputs in test_cases:
opts = c10d.AllreduceCoalescedOptions()
opts.reduceOp = op
tensors = [fn(x) for x in inputs]
fut = pg.allreduce_coalesced(tensors, opts).get_future()
fut.wait()
result = fut.value()
for result_tensor, expected in zip(result, outputs):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(result_tensor, expected)
def test_allreduce_coalesced_basics(self):
self._test_allreduce_coalesced_basics(lambda t: t.clone())
def _test_allreduce_coalesced_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = [pg.allreduce_coalesced(input).get_future() for input in inputs]
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
2
* [
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
)
],
result,
msg="Mismatch in interation {}".format(i),
)
def test_allreduce_coalesced_stress(self):
inputs = [2 * [torch.tensor([i + self.rank])] for i in range(1000)]
self._test_allreduce_coalesced_stress(inputs)
def test_sparse_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1])
t2 = torch.sparse_coo_tensor([[0]], [1], size=(2,))
t3 = torch.sparse_coo_tensor([[0]], [1], size=(4,))
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor layout"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t2, t3], opts)
# Sparse allreduce only works with c10d.ReduceOp.SUM.
for op in [c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX]:
with self.assertRaisesRegex(ValueError, "unsupported reduction operation"):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
pg.allreduce([t3], opts)
def _test_sparse_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
for num_inputs_per_rank in [1, 2]:
tests = simple_sparse_reduce_tests(
self.rank, self.world_size, num_inputs=num_inputs_per_rank
)
for (inputs, outputs) in tests:
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors).get_future()
fut.wait()
result = fut.value()
self.assertEqual(tensors, outputs)
self.assertEqual(result, outputs)
@unittest.skip("intermittent failures on Windows, in CI")
def test_sparse_allreduce_basics(self):
self._test_sparse_allreduce_basics(lambda t: t)
@skip_if_lt_x_gpu(2)
def test_sparse_allreduce_basics_cuda(self):
self._test_sparse_allreduce_basics(lambda t: t.clone().cuda())
def test_scatter_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = -1
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = self.world_size
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([t1, t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect input list size {}. Input list size should be {}"
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t2] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t3] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "requires empty input on non-root"):
opts = c10d.ScatterOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.scatter([t1], [[t1] * self.world_size], opts)
def _test_scatter_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank])) for _ in range(self.world_size)]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the scatter root and accumulate work items
futures = []
for i in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = i
if i == self.rank:
futures.append(pg.scatter([outputs[i]], [input], opts).get_future())
else:
futures.append(pg.scatter([outputs[i]], [], opts).get_future())
# Wait for work to complete
for i in range(self.world_size):
futures[i].wait()
result = futures[i].value()
self.assertEqual(torch.tensor([i]), result[0])
def test_scatter_basics(self):
self._test_scatter_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_scatter_basics_cuda(self):
self._test_scatter_basics(lambda t: t.clone().cuda())
def _test_scatter_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
outputs = [
[fn(torch.tensor([-1])) for _ in range(self.world_size)]
for _ in range(len(inputs))
]
future_handles = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = root
if root == self.rank:
fut = pg.scatter(
[outputs[i][root]], [[fn(e) for e in inputs[i]]], opts
).get_future()
else:
fut = pg.scatter([outputs[i][root]], [], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
iter = i // self.world_size
root = i % self.world_size
result = future_handle.value()
self.assertEqual(
torch.tensor([iter + root]),
result[0],
msg=("Mismatch in iteration %d for rank %d" % (iter, root)),
)
def test_scatter_stress(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone())
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/15963")
@skip_if_lt_x_gpu(2)
def test_scatter_stress_cuda(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone().cuda())
def test_gather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = -1
pg.gather([], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = self.world_size
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [t1, t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * self.world_size, [t1] * self.world_size], [t1], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect output list size {}. Output list size should be {}"
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t2] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t3] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "requires empty output on non-root"):
opts = c10d.GatherOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.gather([[t1] * self.world_size], [t1], opts)
def _test_gather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank]))]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the gather root and accumulate work items
futures = []
for i in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = i
if i == self.rank:
futures.append(pg.gather([outputs], input, opts).get_future())
else:
futures.append(pg.gather([], input, opts).get_future())
# Wait for work to complete
expected = [torch.tensor([rank]) for rank in range(self.world_size)]
for i in range(self.world_size):
futures[i].wait()
result = futures[i].value()
if i == self.rank:
self.assertEqual(expected, result)
def test_gather_basics(self):
self._test_gather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_gather_basics_cuda(self):
self._test_gather_basics(lambda t: t.clone().cuda())
def _test_gather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = root
if root == self.rank:
fut = pg.gather(outputs[i], [fn(inputs[i])], opts).get_future()
else:
fut = pg.gather([], [fn(inputs[i])], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
result = future_handle.value()
self.assertEqual(
expected_outputs[iter],
[result],
msg=("Mismatch in iteration %d for root %d" % (iter, root)),
)
def test_gather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_gather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone().cuda())
def test_allgather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty input tensor list"):
pg.allgather([], [])
with self.assertRaisesRegex(
ValueError, "requires input/output tensor lists to have the same length"
):
pg.allgather([], [t1])
with self.assertRaisesRegex(
ValueError, "requires input/output tensor lists to have the same length"
):
pg.allgather([[t1] * self.world_size, [t1] * self.world_size], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size - 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size + 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t2]
)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t3]
)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather([([t1, t2] * (self.world_size))[: self.world_size]], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather([([t1, t3] * (self.world_size))[: self.world_size]], [t1])
def _test_allgather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Run with N input tensor per rank
for n in [1, 2, 3]:
input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)]
output = [
[fn(torch.tensor([-1])) for _ in range(n * self.world_size)]
for _ in range(n)
]
expected_output = [
[torch.tensor([i]) for i in range(n * self.world_size)]
for _ in range(n)
]
fut = pg.allgather(output, input).get_future()
fut.wait()
result = fut.value()
if n == 1:
result = [result]
self.assertEqual(expected_output, result)
def test_allgather_basics(self):
self._test_allgather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allgather_basics_cuda(self):
self._test_allgather_basics(lambda t: t.clone().cuda())
def _test_allgather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
fut = pg.allgather(outputs[i], [fn(inputs[i])]).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
self.assertEqual(
expected_outputs[i],
[result],
msg=("Mismatch in iteration %d" % i),
)
def test_allgather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_allgather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone().cuda())
def test_allgather_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
dummy_input = [torch.zeros([1], dtype=torch.float32)]
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size)
]
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
ValueError, "invalid size of output tensor at index 0"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([1], dtype=torch.float64)]
with self.assertRaisesRegex(ValueError, "invalid tensor type at index 0"):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output lists have too many elements
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size + 1)
]
with self.assertRaisesRegex(
ValueError, "output lists should be equal to world size"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output is not a list of lists.
dummy_output_lists = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
RuntimeError, "Invalid function argument.*output_tensor_lists"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
def test_reduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.reduce([t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element tensor list"
):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.reduce([t1, t1], opts)
def _test_reduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
opts.rootRank = root
tmp = fn(input)
fut = pg.reduce([tmp], opts).get_future()
fut.wait()
result = fut.value()
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, result[0])
def test_reduce_basics(self):
self._test_reduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_reduce_basics_cuda(self):
self._test_reduce_basics(lambda t: t.clone().cuda())
def _test_reduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.rootRank = root
tmp = inputs[i].clone()
outputs.append(tmp)
fut = pg.reduce([tmp], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(iter * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
result[0],
msg=("Mismatch in iteration %d with root rank %d" % (iter, root)),
)
def test_reduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_reduce_stress(inputs)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_reduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_reduce_stress(inputs)
def test_send_recv_all_to_all(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
inputs = [torch.tensor([self.rank]) for _ in range(self.world_size)]
outputs = [torch.tensor([-1]) for _ in range(self.world_size)]
# Issue sends
send_work = []
for i in range(self.world_size):
if i == self.rank:
continue
send_work.append(pg.send([inputs[i]], i, 0))
# Issue recvs
recv_work = []
for i in range(self.world_size):
if i == self.rank:
continue
recv_work.append(pg.recv([outputs[i]], i, 0))
# Wait for sends to complete
for work in send_work:
work.wait()
self.assertTrue(work.is_completed())
# Wait for recvs to complete
for work in recv_work:
work.wait()
self.assertTrue(work.is_completed())
# Test that every output other than our own contains the respective rank
for i in range(self.world_size):
if i == self.rank:
continue
self.assertEqual(torch.tensor([i]), outputs[i])
def test_barrier_implies_wait(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Kick off allreduce operations
size = (100, 100)
num = 16
tensors = [torch.full(size, float(i)) for i in range(num)]
for tensor in tensors:
# Note: leak the returned work handle
pg.allreduce(tensor)
# Barrier should ensure all previous work has completed
pg.barrier().get_future().wait()
for i, tensor in enumerate(tensors):
self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)
@skip_if_win32()
def test_round_robin(self):
num_process_groups = 2
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore(str(i), store), self.rank, self.world_size, self.opts()
)
for i in range(num_process_groups)
]
)
# Run a few collectives so that we have called each process group
for _ in range(num_process_groups + 1):
tensor = torch.full([100, 100], float(self.rank))
pg.broadcast(tensor, root=0).wait()
self.assertEqual(torch.full([100, 100], 0.0), tensor)
@skip_if_win32()
def test_round_robin_create_destroy(self):
store = c10d.FileStore(self.file_name, self.world_size)
def create(num, prefix):
return c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore("%s/%d" % (prefix, i), store),
self.rank,
self.world_size,
self.opts()
)
for i in range(num)
]
)
# Run create/use/destroy twice
for i in range(2):
num_process_groups = 2
pg = create(num=num_process_groups, prefix=i)
for _ in range(3):
tensor = torch.ones([10, 10])
pg.allreduce(tensor).wait()
self.assertEqual(torch.full([10, 10], float(self.world_size)), tensor)
del pg
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class DistributedDataParallelTest(test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo._Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_gloo()
def test_gloo_backend_cpu_module(self):
self._test_gloo_backend([torch.device("cpu")], None)
@requires_gloo()
def test_gloo_backend_cpu_module_grad_is_view(self):
self._test_gloo_backend([torch.device("cpu")], None, gradient_as_bucket_view=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, int_devices)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, devices)
@requires_gloo()
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
@requires_gloo()
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
def _test_global_local_unused_params_grad(self, gradient_as_bucket_view=False, static_graph=False):
"""
By simulating a multi-task training, this test is to make sure:
1) DDP does not touch the grad of globally unused parameters.
2) DDP does update the grad of locally unused parameters.
"""
class GlobalLocalUnusedParamModule(nn.Module):
def __init__(self):
super(GlobalLocalUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p, self.task_unused.p)
def forward(self, x, rank):
return self.t0(x) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
# Run forward
output = model(8, self.rank)
# The grads of all parameters should be None at this point.
t0_p, t1_p, task_unused_p = model.module.task_parameters()
self.assertIsNone(t0_p.grad)
self.assertIsNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
# Run backward
output.mean().backward()
# Now locally unused parameter should have grad updated on all ranks.
# However the globally unused parameter should still have None grad.
self.assertIsNotNone(t0_p.grad)
self.assertIsNotNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
cpu_model._set_static_graph()
run_and_verify_grad(cpu_model)
# Test on GPU
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
gpu_model._set_static_graph()
run_and_verify_grad(gpu_model)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad(self):
self._test_global_local_unused_params_grad()
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad_with_grad_is_view(self):
self._test_global_local_unused_params_grad(gradient_as_bucket_view=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad_with_static_graph(self):
self._test_global_local_unused_params_grad(static_graph=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_find_unused_parameters_when_unused_parameters_empty(self):
"""
An empty unused_parameters array does not imply find_unused_parameters =
false. This test makes sure that DDP allreduces unused parameters
accordingly where the forward pass in some process uses all parameters.
This unit test creates a module that uses all parameters in rank = 0, and
has unused parameters in other ranks.
"""
class FindUnusedParamModule(nn.Module):
def __init__(self):
super(FindUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p)
def forward(self, x, rank):
return self.t1(self.t0(x)) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
# Run forward
output = model(8, self.rank)
# The grads of all parameters should be None at this point.
[self.assertIsNone(t_p.grad) for t_p in model.module.task_parameters()]
# Run backward
output.mean().backward()
# Now locally unused parameter should have grad updated on all ranks.
[self.assertIsNotNone(t_p.grad) for t_p in model.module.task_parameters()]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
FindUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(cpu_model)
# Test on GPU
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
FindUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(gpu_model)
@requires_gloo()
def test_ignored_output(self):
"""
Test that the output of a model can be ignored and that there is no
implicit requirement that `backward` gets called.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutput(nn.Module):
def __init__(self):
super(IgnoredOutput, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutput().float(),
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# Run a few iterations where we ignore the output.
for _ in range(4):
output = model(input)
del output
# Run a few iterations where we use the output.
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
@requires_gloo()
def test_ignored_output_with_unused_parameters(self):
"""
Test that the output of a model can be ignored and that there is no
implicit requirement that `backward` gets called, if not all model
parameters participated in computing the model output.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutputWithUnusedParameters(nn.Module):
def __init__(self):
super(IgnoredOutputWithUnusedParameters, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutputWithUnusedParameters().float(),
process_group=process_group,
find_unused_parameters=True,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# Run a few iterations where we ignore the output.
for _ in range(4):
output = model(input)
del output
# Run a few iterations where we use the output.
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
def _run_and_verify_sparse_gradients(self, vanilla_model, ddp_model):
mult = 2
batch_size = mult * self.world_size
criterion = nn.CrossEntropyLoss()
input = torch.randint(0, 10, [batch_size, 2])
target = torch.randint(0, 10, [batch_size])
# Run with entire batch against single process version
criterion(vanilla_model(input), target).backward()
# Run with partial batch against multi process version
partial_input = input.split(mult)[self.rank]
partial_target = target.split(mult)[self.rank]
criterion(ddp_model(partial_input), partial_target).backward()
# Check that the gradients are sparse and identical
vanilla_parameter = next(vanilla_model.parameters())
ddp_parameter = next(ddp_model.parameters())
self.assertEqual(vanilla_parameter.grad, ddp_parameter.grad)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_save_load_checkpoint(self):
dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
def train_loop(model, optimizer, iterations):
for _ in range(iterations):
optimizer.zero_grad()
output = model(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model_withload = TestModel().float().to(device_id)
model_withoutload = TestModel().float().to(device_id)
ddp_withload = DistributedDataParallel(
model_withload,
device_ids=[device_id],
)
ddp_withoutload = DistributedDataParallel(
model_withoutload,
device_ids=[device_id],
)
# ensure that all the three models start with the same set of parameters. By default they are randomized on construction
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
for p in model_withload.parameters():
with torch.no_grad():
p.zero_()
for p in ddp_withoutload.parameters():
with torch.no_grad():
p.zero_()
batch_size = 4
criterion = nn.CrossEntropyLoss()
optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001)
optimizer_non_ddp_withload = torch.optim.SGD(model_withload.parameters(), lr=0.001)
optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001)
input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# run the model for 6 iterations, with a checkpoint in the middle
train_loop(ddp_withload, optimizer_withload, 3)
# zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict
checkpoint_path = tempfile.gettempdir() + "/model.checkpoint"
if self.rank == 0:
torch.save(ddp_withload.state_dict(), checkpoint_path)
dist.barrier()
map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank}
ddp_state_dict = torch.load(checkpoint_path, map_location=map_location)
for model in [ddp_withload, model_withload]:
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
ddp_withload.load_state_dict(ddp_state_dict)
# the non-DDP model needs to first remove the prefix of "module." from the DDP state dict
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, "module.")
model_withload.load_state_dict(ddp_state_dict)
train_loop(ddp_withload, optimizer_withload, 3)
train_loop(model_withload, optimizer_non_ddp_withload, 3)
# re-run the model with the same inputs for 6 iterations with no checkpoint
train_loop(ddp_withoutload, optimizer_withoutload, 6)
for p_withload, p_withoutload, p_non_ddp_withload in zip(
ddp_withload.parameters(), ddp_withoutload.parameters(), model_withload.parameters()
):
self.assertEqual(p_withload, p_withoutload)
self.assertEqual(p_non_ddp_withload, p_withoutload)
def _test_sparse_gradients(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Ensure initialized weights and inputs are identical across processes
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
@requires_gloo()
def test_sparse_gradients(self):
self._test_sparse_gradients()
@requires_gloo()
def test_sparse_gradients_grad_is_view(self):
self._test_sparse_gradients(gradient_as_bucket_view=True)
@requires_gloo()
def test_ddp_comm_hook_future_passing_cpu(self):
"""
This unit test verifies whether the Future object is passed properly.
The callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
ModuleForDdpCommHook().cpu(), process_group=process_group
)
# Register DDP Communication Hook
cpu_model.register_comm_hook(None, self._simple_hook)
# check whether the grads are equal to what then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(cpu_model, 8, 2 * torch.ones(2, 2))
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_gloo(self):
"""
This unit test verifies whether the Future object is passed properly using gloo backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
@requires_gloo()
def test_ddp_invalid_comm_hook_init(self):
"""
This unit test makes sure that register_comm_hook properly checks the format
of hook defined by user. The Python hook must be callable. This test also
checks whether bucket annotation checked properly if defined.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
with self.assertRaisesRegex(TypeError, "Communication hook must be callable."):
model.register_comm_hook(state=None, hook=1)
with self.assertRaisesRegex(
ValueError, "bucket annotation should be dist.GradBucket."
):
def comm_hook(state: object, bucket: int) -> torch.futures.Future:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
@requires_gloo()
def test_ddp_invalid_comm_hook_return_type(self):
"""
This test checks whether return annotation checked properly if defined. It also
checks whether an internal error is thrown if return type is incorrect and user
hasn't specified any return type annotation.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
expected_err = "Communication hook: return annotation should be torch.futures.Future or torch._C.Future."
with self.assertRaisesRegex(
ValueError,
expected_err,
):
def comm_hook(state: object, bucket: dist.GradBucket) -> int:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
verify_ddp_error_logged(model, expected_err)
with self.assertRaisesRegex(
RuntimeError,
"callback must return a torch.futures.Future or torch._C.Future object, but got",
):
def comm_hook(state: object, bucket: dist.GradBucket):
return 1
model.register_comm_hook(state=None, hook=comm_hook)
# Run forward
output = model(8, self.rank)
# Run backward
output.mean().backward()
@requires_gloo()
def test_ddp_comm_hook_register_just_once(self):
"""
DDP communication hook can only be registered once. This test validates whether
the error is thrown properly when register_comm_hook is called more than once.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
def dummy_hook(state, bucket):
fut = torch.futures.Future()
fut.set_result([bucket.get_tensor()])
return fut
model.register_comm_hook(None, dummy_hook)
with self.assertRaisesRegex(
RuntimeError,
"register_comm_hook or register_builtin_comm_hook can only be called once.",
):
model.register_comm_hook(None, dummy_hook)
@requires_gloo()
def test_ddp_comm_hook_sparse_gradients(self):
"""
Runs "test_sparse_gradients" unit test with DDP communication hook. We define a
simple hook that does allreduce and works with gloo backend for this test.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Ensure initialized weights and inputs are identical across processes
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
)
def allreduce_hook_gloo(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future:
def div_by_world_size(fut):
# Divide the result by 2 * world_size.
return [t / self.world_size for t in fut.wait()]
# Prepare allreduced grad bucket tensors by running an async work.
fut = process_group.allreduce([bucket.get_tensor()]).get_future()
return fut.then(div_by_world_size)
ddp_model.register_comm_hook(None, allreduce_hook_gloo)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
class ReducerModule(nn.Module):
def __init__(self):
super(ReducerModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, use_fc3=True):
x = self.relu(self.fc1(x)).float()
x = self.relu(self.fc2(x)).float()
if use_fc3:
x = self.fc3(x).float()
return F.softmax(x, dim=1)
@requires_gloo()
class ReducerTest(TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile(delete=False)
self.store = c10d.FileStore(self.file.name, 1)
self.process_group = c10d.ProcessGroupGloo(self.store, 0, 1)
def test_single_dtype_single_bucket(self):
model = ReducerModule()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
dist.Reducer([parameters], buckets, self.process_group)
def _create_mixed_precision_model(self):
model = ReducerModule()
model.float()
model.fc1.double()
return model
def test_multi_dtype_single_bucket(self):
model = self._create_mixed_precision_model()
# Raise if there are multiple types per bucket.
# In this case we create one bucket for all parameters.
with self.assertRaises(RuntimeError):
parameters = [list(model.parameters())]
buckets = [list(range(len(parameters[0])))]
dist.Reducer(parameters, buckets, self.process_group)
def test_multi_dtype_multi_bucket(self):
model = self._create_mixed_precision_model()
parameters = [list(model.parameters())]
group_by_dtype = groupby(
range(len(parameters[0])), key=lambda i: parameters[0][i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
dist.Reducer(parameters, buckets, self.process_group)
def _create_reducer_for_models(self, models, find_unused_parameters=False):
parameters = [list(model.parameters()) for model in models]
group_by_dtype = groupby(
range(len(parameters[0])), key=lambda i: parameters[0][i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
return dist.Reducer(
parameters,
buckets,
self.process_group,
find_unused_parameters=find_unused_parameters,
)
def test_reducer_no_multi_replicas(self):
num_replicas = 2
models = [self._create_mixed_precision_model() for _ in range(num_replicas)]
with self.assertRaisesRegex(
RuntimeError,
"Expected exactly one model replica.",
):
reducer = self._create_reducer_for_models(models)
def test_forward_backward(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model])
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input), target)
reducer.prepare_for_backward(output)
output.backward()
def test_forward_backward_unused_parameters(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input, use_fc3=False), target)
# Check that the grad of fc3 is not set.
self.assertEqual(None, model.fc3.weight.grad)
# Compute and accumulate gradients.
reducer.prepare_for_backward(output)
output.backward()
# The reducer will have marked the grad of fc3 as ready, because
# it doesn't show up in the autograd graph of `output`. Since fc3.weight
# is considered being globally unused, it will be kept untouched as None.
self.assertEqual(None, model.fc3.weight.grad)
def test_forward_backward_optimizer(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
for i in range(3):
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# The `zero_grad` function calls `detach_` and `zero_` on the grad
# tensors of model parameters. If we tried to set the grad tensors
# to a view of the reducer's bucket tensors, this would blow up.
optimizer.zero_grad()
# Unused parameter only in the first iteration.
output = loss(model(input, use_fc3=(i > 0)), target)
reducer.prepare_for_backward(output)
output.backward()
optimizer.step()
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_gloo_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo._Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cuda:%d" % self.rank)
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
def test_broadcast_coalesced_gloo_cpu(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo._Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cpu")
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_gloo(self):
self._test_sequence_num_set_default_pg(backend="gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_gloo_new_group(self):
self._test_sequence_num_set_new_group(backend="gloo")
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_sequence_num_incremented_gloo_default(self):
self._test_sequence_num_incremented_default_group("gloo")
@skip_if_lt_x_gpu(4)
@requires_gloo()
def test_sequence_num_incremented_gloo_subgroup(self):
if self.world_size < 4:
return unittest.skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("gloo")
@requires_gloo()
def test_gloo_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "device_ids not supported"):
c10d.barrier(device_ids=[self.rank])
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
| 38.513787 | 128 | 0.608238 | import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
import unittest
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
skip_if_lt_x_gpu,
simple_sparse_reduce_tests,
skip_if_win32,
create_device,
verify_ddp_error_logged,
skip_if_rocm,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_TSAN,
)
import test_c10d_common
from test_c10d_common import (
LOOPBACK,
gpus_for_rank,
Task,
ModuleForDdpCommHook,
SparseGradientModule,
)
def simple_reduce_tests(rank, world_size):
tests = [
(
c10d.ReduceOp.SUM,
torch.tensor([rank + 1.0]),
torch.tensor([float(world_size * (world_size + 1) / 2)]),
),
(
c10d.ReduceOp.PRODUCT,
torch.tensor([rank + 1.0]),
torch.tensor([float(math.factorial(world_size))]),
),
(
c10d.ReduceOp.MIN,
torch.tensor([rank + 1.0]),
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
torch.tensor([rank + 1.0]),
torch.tensor([world_size]),
),
]
for i in range(4):
vin = rank | (1 << i)
vout = 1 << i
tests.append(
(
c10d.ReduceOp.BAND,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
for i in range(1, 5):
vin = reduce(operator.or_, [rank * i + j for j in range(i)])
vout = reduce(operator.or_, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
# Generate tests for XOR.
# These emulate a larger world size per iteration by having every
# rank contribute multiple values that are pre-XOR'ed.
for i in range(1, 5):
vin = reduce(operator.xor, [rank * i + j for j in range(i)])
vout = reduce(operator.xor, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BXOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
return tests
def simple_coalesced_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([rank + 1]), torch.tensor([(rank + 1) ** 2])],
[
torch.tensor([float(world_size * (world_size + 1) / 2)]),
torch.tensor(
[float(world_size * (world_size + 1) * (2 * world_size + 1) / 6)]
),
],
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([rank + 1.0]), torch.tensor([rank + 2.0])],
[
torch.tensor([float(math.factorial(world_size))]),
torch.tensor([float(math.factorial(world_size + 1))]),
],
),
(
c10d.ReduceOp.MIN,
[torch.tensor([rank + x]) for x in [0.0, 1.0]],
[torch.tensor([0.0]), torch.tensor([1.0])],
),
(
c10d.ReduceOp.MAX,
[torch.tensor([rank + x]) for x in [1.0, 2.0]],
[torch.tensor([world_size]), torch.tensor([world_size + 1.0])],
),
]
def simple_multi_input_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([2 * rank + 0.0]), torch.tensor([2 * rank + 1.0])],
torch.tensor([float(world_size * (2 * world_size - 1))]),
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([float(math.factorial(2 * world_size))]),
),
(
c10d.ReduceOp.MIN,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([2 * world_size]),
),
]
class RendezvousEnvTest(TestCase):
@requires_gloo()
@retry_on_connect_failures
def test_logging_init(self):
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(common.find_free_port())
os.environ["RANK"] = "0"
previous_handlers = logging.root.handlers
c10d.init_process_group(backend="gloo", init_method="env://")
current_handlers = logging.root.handlers
self.assertEqual(len(previous_handlers), len(current_handlers))
for current, previous in zip(current_handlers, previous_handlers):
self.assertEqual(current, previous)
c10d.destroy_process_group()
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_gloo()
@retry_on_connect_failures
def test_default_store_timeout_gloo(self):
self._test_default_store_timeout("gloo")
@requires_gloo()
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class ProcessGroupGlooTest(MultiProcessTestCase):
def setUp(self):
super(ProcessGroupGlooTest, self).setUp()
# For Windows platform, Python does not support fork, change it to spawn here.
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def opts(self, threads=2):
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
opts._devices = [create_device(interface=LOOPBACK)]
opts._threads = threads
return opts
def test_multi_device_constructor(self):
store = c10d.FileStore(self.file_name, self.world_size)
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
opts._devices = [
create_device(interface=LOOPBACK),
create_device(interface=LOOPBACK),
]
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, opts)
# Execute 2x the number of operations to ensure we use every device.
for fut in [pg.allreduce(torch.ones(i + 1)).get_future() for i in range(4)]:
fut.wait()
def test_empty_tensors(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
xs = [torch.FloatTensor([])]
fut = pg.broadcast(xs).get_future()
fut.wait()
output = fut.value()
self.assertEqual(0, output[0].numel())
self.assertEqualIgnoreType(xs[0], output[0])
def test_broadcast_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = -1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t3], opts)
def _test_broadcast_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
fut = pg.broadcast(xs, opts).get_future()
fut.wait()
return fut.value()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = fn(torch.tensor([self.rank]))
output = broadcast([x], i, 0)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i]), output[0])
# Run with 2 input tensors
num = 2
for j in range(num):
xs = [
fn(torch.tensor([self.rank * num + 0.0])),
fn(torch.tensor([self.rank * num + 1.0])),
]
output = broadcast(xs, i, j)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), output[0])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), output[1])
# Test overloaded convenience function
x = torch.tensor([self.rank + 1.0])
fut = pg.broadcast(x, root=0).get_future()
fut.wait()
result = fut.value()
self.assertEqual(torch.tensor([1.0]), result[0])
def test_broadcast_basics(self):
self._test_broadcast_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_broadcast_basics_cuda(self):
self._test_broadcast_basics(lambda t: t.clone().cuda())
def _test_broadcast_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [
pg.broadcast(inputs[i], root=(i % self.world_size))
for i in range(len(inputs))
]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
torch.tensor([(i * self.world_size) + (i % self.world_size)]),
inputs[i],
msg=("Mismatch in iteration %d" % i),
)
def test_broadcast_stress(self):
inputs = [torch.tensor([i * self.world_size + self.rank]) for i in range(1000)]
self._test_broadcast_stress(inputs)
@skip_if_lt_x_gpu(2)
def test_broadcast_stress_cuda(self):
inputs = [
torch.tensor([i * self.world_size + self.rank]).cuda() for i in range(1000)
]
self._test_broadcast_stress(inputs)
def test_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t3], opts)
def _test_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for (op, input, expected) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
fut = pg.allreduce([tensor], opts).get_future()
fut.wait()
result = fut.value()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, result[0])
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors, opts).get_future()
fut.wait()
result = fut.value()
for tensor in result:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
fut = pg.allreduce(x).get_future()
fut.wait()
result = fut.value()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]), result[0]
)
def test_allreduce_basics(self):
self._test_allreduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allreduce_basics_cuda(self):
self._test_allreduce_basics(lambda t: t.clone().cuda())
# _using_work_api tests are to make sure we still properly support work API.
# This should go away as we deprecate it.
def _test_allreduce_basics_using_work_api(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for (op, input, expected) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
work = pg.allreduce([tensor], opts)
work.wait()
result = work.result()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, result[0])
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
work = pg.allreduce(tensors, opts)
work.wait()
result = work.result()
for tensor in result:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
work = pg.allreduce(x)
work.wait()
result = work.result()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]), result[0]
)
def test_allreduce_basics_using_work_api(self):
self._test_allreduce_basics_using_work_api(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allreduce_basics_cuda_using_work_api(self):
self._test_allreduce_basics_using_work_api(lambda t: t.clone().cuda())
def _test_allreduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = [pg.allreduce(inputs[i]).get_future() for i in range(len(inputs))]
for i, future_handle in enumerate(future_handles):
future_handle.wait()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
future_handle.value()[0],
msg=("Mismatch in iteration %d" % i),
)
def test_allreduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allreduce_stress(inputs)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_allreduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allreduce_stress(inputs)
def test_allreduce_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros(1, dtype=torch.float32)
t2 = torch.zeros(1, dtype=torch.float64)
t3 = torch.sparse_coo_tensor([[0]], [1], size=(1,))
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([], opts)
with self.assertRaisesRegex(ValueError, "tensors must all have the same type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor layout at index"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t3], opts)
with self.assertRaisesRegex(ValueError, "unsupported layout"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t3, t3.clone()], opts)
@skip_if_lt_x_gpu(1)
def test_allreduce_coalesced_checks_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros(1, dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "unsupported device type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1.cuda(), t1.cuda()], opts)
def _test_allreduce_coalesced_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
test_cases = simple_coalesced_reduce_tests(self.rank, self.world_size)
for op, inputs, outputs in test_cases:
opts = c10d.AllreduceCoalescedOptions()
opts.reduceOp = op
tensors = [fn(x) for x in inputs]
fut = pg.allreduce_coalesced(tensors, opts).get_future()
fut.wait()
result = fut.value()
for result_tensor, expected in zip(result, outputs):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(result_tensor, expected)
def test_allreduce_coalesced_basics(self):
self._test_allreduce_coalesced_basics(lambda t: t.clone())
def _test_allreduce_coalesced_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = [pg.allreduce_coalesced(input).get_future() for input in inputs]
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
2
* [
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
)
],
result,
msg="Mismatch in interation {}".format(i),
)
def test_allreduce_coalesced_stress(self):
inputs = [2 * [torch.tensor([i + self.rank])] for i in range(1000)]
self._test_allreduce_coalesced_stress(inputs)
def test_sparse_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1])
t2 = torch.sparse_coo_tensor([[0]], [1], size=(2,))
t3 = torch.sparse_coo_tensor([[0]], [1], size=(4,))
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor layout"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t2, t3], opts)
# Sparse allreduce only works with c10d.ReduceOp.SUM.
for op in [c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX]:
with self.assertRaisesRegex(ValueError, "unsupported reduction operation"):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
pg.allreduce([t3], opts)
def _test_sparse_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
for num_inputs_per_rank in [1, 2]:
tests = simple_sparse_reduce_tests(
self.rank, self.world_size, num_inputs=num_inputs_per_rank
)
for (inputs, outputs) in tests:
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors).get_future()
fut.wait()
result = fut.value()
self.assertEqual(tensors, outputs)
self.assertEqual(result, outputs)
@unittest.skip("intermittent failures on Windows, in CI")
def test_sparse_allreduce_basics(self):
self._test_sparse_allreduce_basics(lambda t: t)
@skip_if_lt_x_gpu(2)
def test_sparse_allreduce_basics_cuda(self):
self._test_sparse_allreduce_basics(lambda t: t.clone().cuda())
def test_scatter_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = -1
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = self.world_size
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([t1, t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect input list size {}. Input list size should be {}"
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t2] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t3] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "requires empty input on non-root"):
opts = c10d.ScatterOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.scatter([t1], [[t1] * self.world_size], opts)
def _test_scatter_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank])) for _ in range(self.world_size)]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the scatter root and accumulate work items
futures = []
for i in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = i
if i == self.rank:
futures.append(pg.scatter([outputs[i]], [input], opts).get_future())
else:
futures.append(pg.scatter([outputs[i]], [], opts).get_future())
# Wait for work to complete
for i in range(self.world_size):
futures[i].wait()
result = futures[i].value()
self.assertEqual(torch.tensor([i]), result[0])
def test_scatter_basics(self):
self._test_scatter_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_scatter_basics_cuda(self):
self._test_scatter_basics(lambda t: t.clone().cuda())
def _test_scatter_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
outputs = [
[fn(torch.tensor([-1])) for _ in range(self.world_size)]
for _ in range(len(inputs))
]
future_handles = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = root
if root == self.rank:
fut = pg.scatter(
[outputs[i][root]], [[fn(e) for e in inputs[i]]], opts
).get_future()
else:
fut = pg.scatter([outputs[i][root]], [], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
iter = i // self.world_size
root = i % self.world_size
result = future_handle.value()
self.assertEqual(
torch.tensor([iter + root]),
result[0],
msg=("Mismatch in iteration %d for rank %d" % (iter, root)),
)
def test_scatter_stress(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone())
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/15963")
@skip_if_lt_x_gpu(2)
def test_scatter_stress_cuda(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone().cuda())
def test_gather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = -1
pg.gather([], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = self.world_size
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [t1, t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * self.world_size, [t1] * self.world_size], [t1], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect output list size {}. Output list size should be {}"
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t2] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t3] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "requires empty output on non-root"):
opts = c10d.GatherOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.gather([[t1] * self.world_size], [t1], opts)
def _test_gather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank]))]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the gather root and accumulate work items
futures = []
for i in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = i
if i == self.rank:
futures.append(pg.gather([outputs], input, opts).get_future())
else:
futures.append(pg.gather([], input, opts).get_future())
# Wait for work to complete
expected = [torch.tensor([rank]) for rank in range(self.world_size)]
for i in range(self.world_size):
futures[i].wait()
result = futures[i].value()
if i == self.rank:
self.assertEqual(expected, result)
def test_gather_basics(self):
self._test_gather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_gather_basics_cuda(self):
self._test_gather_basics(lambda t: t.clone().cuda())
def _test_gather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = root
if root == self.rank:
fut = pg.gather(outputs[i], [fn(inputs[i])], opts).get_future()
else:
fut = pg.gather([], [fn(inputs[i])], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
result = future_handle.value()
self.assertEqual(
expected_outputs[iter],
[result],
msg=("Mismatch in iteration %d for root %d" % (iter, root)),
)
def test_gather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_gather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone().cuda())
def test_allgather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty input tensor list"):
pg.allgather([], [])
with self.assertRaisesRegex(
ValueError, "requires input/output tensor lists to have the same length"
):
pg.allgather([], [t1])
with self.assertRaisesRegex(
ValueError, "requires input/output tensor lists to have the same length"
):
pg.allgather([[t1] * self.world_size, [t1] * self.world_size], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size - 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size + 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t2]
)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t3]
)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather([([t1, t2] * (self.world_size))[: self.world_size]], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather([([t1, t3] * (self.world_size))[: self.world_size]], [t1])
def _test_allgather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Run with N input tensor per rank
for n in [1, 2, 3]:
input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)]
output = [
[fn(torch.tensor([-1])) for _ in range(n * self.world_size)]
for _ in range(n)
]
expected_output = [
[torch.tensor([i]) for i in range(n * self.world_size)]
for _ in range(n)
]
fut = pg.allgather(output, input).get_future()
fut.wait()
result = fut.value()
if n == 1:
result = [result]
self.assertEqual(expected_output, result)
def test_allgather_basics(self):
self._test_allgather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allgather_basics_cuda(self):
self._test_allgather_basics(lambda t: t.clone().cuda())
def _test_allgather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
fut = pg.allgather(outputs[i], [fn(inputs[i])]).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
self.assertEqual(
expected_outputs[i],
[result],
msg=("Mismatch in iteration %d" % i),
)
def test_allgather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_allgather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone().cuda())
def test_allgather_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
dummy_input = [torch.zeros([1], dtype=torch.float32)]
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size)
]
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
ValueError, "invalid size of output tensor at index 0"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([1], dtype=torch.float64)]
with self.assertRaisesRegex(ValueError, "invalid tensor type at index 0"):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output lists have too many elements
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size + 1)
]
with self.assertRaisesRegex(
ValueError, "output lists should be equal to world size"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output is not a list of lists.
dummy_output_lists = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
RuntimeError, "Invalid function argument.*output_tensor_lists"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
def test_reduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.reduce([t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element tensor list"
):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.reduce([t1, t1], opts)
def _test_reduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
opts.rootRank = root
tmp = fn(input)
fut = pg.reduce([tmp], opts).get_future()
fut.wait()
result = fut.value()
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, result[0])
def test_reduce_basics(self):
self._test_reduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_reduce_basics_cuda(self):
self._test_reduce_basics(lambda t: t.clone().cuda())
def _test_reduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.rootRank = root
tmp = inputs[i].clone()
outputs.append(tmp)
fut = pg.reduce([tmp], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(iter * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
result[0],
msg=("Mismatch in iteration %d with root rank %d" % (iter, root)),
)
def test_reduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_reduce_stress(inputs)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_reduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_reduce_stress(inputs)
def test_send_recv_all_to_all(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
inputs = [torch.tensor([self.rank]) for _ in range(self.world_size)]
outputs = [torch.tensor([-1]) for _ in range(self.world_size)]
# Issue sends
send_work = []
for i in range(self.world_size):
if i == self.rank:
continue
send_work.append(pg.send([inputs[i]], i, 0))
# Issue recvs
recv_work = []
for i in range(self.world_size):
if i == self.rank:
continue
recv_work.append(pg.recv([outputs[i]], i, 0))
# Wait for sends to complete
for work in send_work:
work.wait()
self.assertTrue(work.is_completed())
# Wait for recvs to complete
for work in recv_work:
work.wait()
self.assertTrue(work.is_completed())
# Test that every output other than our own contains the respective rank
for i in range(self.world_size):
if i == self.rank:
continue
self.assertEqual(torch.tensor([i]), outputs[i])
def test_barrier_implies_wait(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Kick off allreduce operations
size = (100, 100)
num = 16
tensors = [torch.full(size, float(i)) for i in range(num)]
for tensor in tensors:
# Note: leak the returned work handle
pg.allreduce(tensor)
# Barrier should ensure all previous work has completed
pg.barrier().get_future().wait()
for i, tensor in enumerate(tensors):
self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)
@skip_if_win32()
def test_round_robin(self):
num_process_groups = 2
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore(str(i), store), self.rank, self.world_size, self.opts()
)
for i in range(num_process_groups)
]
)
# Run a few collectives so that we have called each process group
for _ in range(num_process_groups + 1):
tensor = torch.full([100, 100], float(self.rank))
pg.broadcast(tensor, root=0).wait()
self.assertEqual(torch.full([100, 100], 0.0), tensor)
@skip_if_win32()
def test_round_robin_create_destroy(self):
store = c10d.FileStore(self.file_name, self.world_size)
def create(num, prefix):
return c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore("%s/%d" % (prefix, i), store),
self.rank,
self.world_size,
self.opts()
)
for i in range(num)
]
)
# Run create/use/destroy twice
for i in range(2):
num_process_groups = 2
pg = create(num=num_process_groups, prefix=i)
for _ in range(3):
tensor = torch.ones([10, 10])
pg.allreduce(tensor).wait()
self.assertEqual(torch.full([10, 10], float(self.world_size)), tensor)
del pg
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class DistributedDataParallelTest(test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo._Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_gloo()
def test_gloo_backend_cpu_module(self):
self._test_gloo_backend([torch.device("cpu")], None)
@requires_gloo()
def test_gloo_backend_cpu_module_grad_is_view(self):
self._test_gloo_backend([torch.device("cpu")], None, gradient_as_bucket_view=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, int_devices)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, devices)
@requires_gloo()
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
@requires_gloo()
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
def _test_global_local_unused_params_grad(self, gradient_as_bucket_view=False, static_graph=False):
class GlobalLocalUnusedParamModule(nn.Module):
def __init__(self):
super(GlobalLocalUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p, self.task_unused.p)
def forward(self, x, rank):
return self.t0(x) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
output = model(8, self.rank)
t0_p, t1_p, task_unused_p = model.module.task_parameters()
self.assertIsNone(t0_p.grad)
self.assertIsNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
output.mean().backward()
self.assertIsNotNone(t0_p.grad)
self.assertIsNotNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
cpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
cpu_model._set_static_graph()
run_and_verify_grad(cpu_model)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
gpu_model._set_static_graph()
run_and_verify_grad(gpu_model)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad(self):
self._test_global_local_unused_params_grad()
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad_with_grad_is_view(self):
self._test_global_local_unused_params_grad(gradient_as_bucket_view=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad_with_static_graph(self):
self._test_global_local_unused_params_grad(static_graph=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_find_unused_parameters_when_unused_parameters_empty(self):
class FindUnusedParamModule(nn.Module):
def __init__(self):
super(FindUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p)
def forward(self, x, rank):
return self.t1(self.t0(x)) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
output = model(8, self.rank)
[self.assertIsNone(t_p.grad) for t_p in model.module.task_parameters()]
output.mean().backward()
[self.assertIsNotNone(t_p.grad) for t_p in model.module.task_parameters()]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
cpu_model = DistributedDataParallel(
FindUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(cpu_model)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
FindUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(gpu_model)
@requires_gloo()
def test_ignored_output(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutput(nn.Module):
def __init__(self):
super(IgnoredOutput, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutput().float(),
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
for _ in range(4):
output = model(input)
del output
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
@requires_gloo()
def test_ignored_output_with_unused_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutputWithUnusedParameters(nn.Module):
def __init__(self):
super(IgnoredOutputWithUnusedParameters, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutputWithUnusedParameters().float(),
process_group=process_group,
find_unused_parameters=True,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
for _ in range(4):
output = model(input)
del output
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
def _run_and_verify_sparse_gradients(self, vanilla_model, ddp_model):
mult = 2
batch_size = mult * self.world_size
criterion = nn.CrossEntropyLoss()
input = torch.randint(0, 10, [batch_size, 2])
target = torch.randint(0, 10, [batch_size])
criterion(vanilla_model(input), target).backward()
partial_input = input.split(mult)[self.rank]
partial_target = target.split(mult)[self.rank]
criterion(ddp_model(partial_input), partial_target).backward()
vanilla_parameter = next(vanilla_model.parameters())
ddp_parameter = next(ddp_model.parameters())
self.assertEqual(vanilla_parameter.grad, ddp_parameter.grad)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_save_load_checkpoint(self):
dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
def train_loop(model, optimizer, iterations):
for _ in range(iterations):
optimizer.zero_grad()
output = model(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model_withload = TestModel().float().to(device_id)
model_withoutload = TestModel().float().to(device_id)
ddp_withload = DistributedDataParallel(
model_withload,
device_ids=[device_id],
)
ddp_withoutload = DistributedDataParallel(
model_withoutload,
device_ids=[device_id],
)
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
for p in model_withload.parameters():
with torch.no_grad():
p.zero_()
for p in ddp_withoutload.parameters():
with torch.no_grad():
p.zero_()
batch_size = 4
criterion = nn.CrossEntropyLoss()
optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001)
optimizer_non_ddp_withload = torch.optim.SGD(model_withload.parameters(), lr=0.001)
optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001)
input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
train_loop(ddp_withload, optimizer_withload, 3)
checkpoint_path = tempfile.gettempdir() + "/model.checkpoint"
if self.rank == 0:
torch.save(ddp_withload.state_dict(), checkpoint_path)
dist.barrier()
map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank}
ddp_state_dict = torch.load(checkpoint_path, map_location=map_location)
for model in [ddp_withload, model_withload]:
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
ddp_withload.load_state_dict(ddp_state_dict)
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, "module.")
model_withload.load_state_dict(ddp_state_dict)
train_loop(ddp_withload, optimizer_withload, 3)
train_loop(model_withload, optimizer_non_ddp_withload, 3)
train_loop(ddp_withoutload, optimizer_withoutload, 6)
for p_withload, p_withoutload, p_non_ddp_withload in zip(
ddp_withload.parameters(), ddp_withoutload.parameters(), model_withload.parameters()
):
self.assertEqual(p_withload, p_withoutload)
self.assertEqual(p_non_ddp_withload, p_withoutload)
def _test_sparse_gradients(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
@requires_gloo()
def test_sparse_gradients(self):
self._test_sparse_gradients()
@requires_gloo()
def test_sparse_gradients_grad_is_view(self):
self._test_sparse_gradients(gradient_as_bucket_view=True)
@requires_gloo()
def test_ddp_comm_hook_future_passing_cpu(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
cpu_model = DistributedDataParallel(
ModuleForDdpCommHook().cpu(), process_group=process_group
)
cpu_model.register_comm_hook(None, self._simple_hook)
self._run_and_verify_hook(cpu_model, 8, 2 * torch.ones(2, 2))
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_gloo(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
@requires_gloo()
def test_ddp_invalid_comm_hook_init(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
with self.assertRaisesRegex(TypeError, "Communication hook must be callable."):
model.register_comm_hook(state=None, hook=1)
with self.assertRaisesRegex(
ValueError, "bucket annotation should be dist.GradBucket."
):
def comm_hook(state: object, bucket: int) -> torch.futures.Future:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
@requires_gloo()
def test_ddp_invalid_comm_hook_return_type(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
expected_err = "Communication hook: return annotation should be torch.futures.Future or torch._C.Future."
with self.assertRaisesRegex(
ValueError,
expected_err,
):
def comm_hook(state: object, bucket: dist.GradBucket) -> int:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
verify_ddp_error_logged(model, expected_err)
with self.assertRaisesRegex(
RuntimeError,
"callback must return a torch.futures.Future or torch._C.Future object, but got",
):
def comm_hook(state: object, bucket: dist.GradBucket):
return 1
model.register_comm_hook(state=None, hook=comm_hook)
# Run forward
output = model(8, self.rank)
# Run backward
output.mean().backward()
@requires_gloo()
def test_ddp_comm_hook_register_just_once(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
def dummy_hook(state, bucket):
fut = torch.futures.Future()
fut.set_result([bucket.get_tensor()])
return fut
model.register_comm_hook(None, dummy_hook)
with self.assertRaisesRegex(
RuntimeError,
"register_comm_hook or register_builtin_comm_hook can only be called once.",
):
model.register_comm_hook(None, dummy_hook)
@requires_gloo()
def test_ddp_comm_hook_sparse_gradients(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Ensure initialized weights and inputs are identical across processes
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
)
def allreduce_hook_gloo(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future:
def div_by_world_size(fut):
# Divide the result by 2 * world_size.
return [t / self.world_size for t in fut.wait()]
# Prepare allreduced grad bucket tensors by running an async work.
fut = process_group.allreduce([bucket.get_tensor()]).get_future()
return fut.then(div_by_world_size)
ddp_model.register_comm_hook(None, allreduce_hook_gloo)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
class ReducerModule(nn.Module):
def __init__(self):
super(ReducerModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, use_fc3=True):
x = self.relu(self.fc1(x)).float()
x = self.relu(self.fc2(x)).float()
if use_fc3:
x = self.fc3(x).float()
return F.softmax(x, dim=1)
@requires_gloo()
class ReducerTest(TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile(delete=False)
self.store = c10d.FileStore(self.file.name, 1)
self.process_group = c10d.ProcessGroupGloo(self.store, 0, 1)
def test_single_dtype_single_bucket(self):
model = ReducerModule()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
dist.Reducer([parameters], buckets, self.process_group)
def _create_mixed_precision_model(self):
model = ReducerModule()
model.float()
model.fc1.double()
return model
def test_multi_dtype_single_bucket(self):
model = self._create_mixed_precision_model()
# Raise if there are multiple types per bucket.
# In this case we create one bucket for all parameters.
with self.assertRaises(RuntimeError):
parameters = [list(model.parameters())]
buckets = [list(range(len(parameters[0])))]
dist.Reducer(parameters, buckets, self.process_group)
def test_multi_dtype_multi_bucket(self):
model = self._create_mixed_precision_model()
parameters = [list(model.parameters())]
group_by_dtype = groupby(
range(len(parameters[0])), key=lambda i: parameters[0][i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
dist.Reducer(parameters, buckets, self.process_group)
def _create_reducer_for_models(self, models, find_unused_parameters=False):
parameters = [list(model.parameters()) for model in models]
group_by_dtype = groupby(
range(len(parameters[0])), key=lambda i: parameters[0][i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
return dist.Reducer(
parameters,
buckets,
self.process_group,
find_unused_parameters=find_unused_parameters,
)
def test_reducer_no_multi_replicas(self):
num_replicas = 2
models = [self._create_mixed_precision_model() for _ in range(num_replicas)]
with self.assertRaisesRegex(
RuntimeError,
"Expected exactly one model replica.",
):
reducer = self._create_reducer_for_models(models)
def test_forward_backward(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model])
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input), target)
reducer.prepare_for_backward(output)
output.backward()
def test_forward_backward_unused_parameters(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input, use_fc3=False), target)
# Check that the grad of fc3 is not set.
self.assertEqual(None, model.fc3.weight.grad)
# Compute and accumulate gradients.
reducer.prepare_for_backward(output)
output.backward()
# The reducer will have marked the grad of fc3 as ready, because
# it doesn't show up in the autograd graph of `output`. Since fc3.weight
self.assertEqual(None, model.fc3.weight.grad)
def test_forward_backward_optimizer(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
for i in range(3):
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
optimizer.zero_grad()
# Unused parameter only in the first iteration.
output = loss(model(input, use_fc3=(i > 0)), target)
reducer.prepare_for_backward(output)
output.backward()
optimizer.step()
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_gloo_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo._Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cuda:%d" % self.rank)
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
def test_broadcast_coalesced_gloo_cpu(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo._Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cpu")
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_gloo(self):
self._test_sequence_num_set_default_pg(backend="gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_gloo_new_group(self):
self._test_sequence_num_set_new_group(backend="gloo")
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_sequence_num_incremented_gloo_default(self):
self._test_sequence_num_incremented_default_group("gloo")
@skip_if_lt_x_gpu(4)
@requires_gloo()
def test_sequence_num_incremented_gloo_subgroup(self):
if self.world_size < 4:
return unittest.skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("gloo")
@requires_gloo()
def test_gloo_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "device_ids not supported"):
c10d.barrier(device_ids=[self.rank])
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.