text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import numpy as np
from pandas.compat import zip
from pandas.core.dtypes.generic import ABCSeries, ABCIndex
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical_dtype,
is_object_dtype,
is_string_like,
is_list_like,
is_scalar,
is_integer,
is_re)
from pandas.core.common import _values_from_object
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.core.base import AccessorProperty, NoNewAttributesMixin
from pandas.util._decorators import Appender
import re
import pandas._libs.lib as lib
import warnings
import textwrap
import codecs
_cpython_optimized_encoders = (
"utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii"
)
_cpython_optimized_decoders = _cpython_optimized_encoders + (
"utf-16", "utf-32"
)
_shared_docs = dict()
def _get_array_list(arr, others):
from pandas.core.series import Series
if len(others) and isinstance(_values_from_object(others)[0],
(list, np.ndarray, Series)):
arrays = [arr] + list(others)
else:
arrays = [arr, others]
return [np.asarray(x, dtype=object) for x in arrays]
def str_cat(arr, others=None, sep=None, na_rep=None):
"""
Concatenate strings in the Series/Index with given separator.
Parameters
----------
others : list-like, or list of list-likes
If None, returns str concatenating strings of the Series
sep : string or None, default None
na_rep : string or None, default None
If None, NA in the series are ignored.
Returns
-------
concat : Series/Index of objects or str
Examples
--------
When ``na_rep`` is `None` (default behavior), NaN value(s)
in the Series are ignored.
>>> Series(['a','b',np.nan,'c']).str.cat(sep=' ')
'a b c'
>>> Series(['a','b',np.nan,'c']).str.cat(sep=' ', na_rep='?')
'a b ? c'
If ``others`` is specified, corresponding values are
concatenated with the separator. Result will be a Series of strings.
>>> Series(['a', 'b', 'c']).str.cat(['A', 'B', 'C'], sep=',')
0 a,A
1 b,B
2 c,C
dtype: object
Otherwise, strings in the Series are concatenated. Result will be a string.
>>> Series(['a', 'b', 'c']).str.cat(sep=',')
'a,b,c'
Also, you can pass a list of list-likes.
>>> Series(['a', 'b']).str.cat([['x', 'y'], ['1', '2']], sep=',')
0 a,x,1
1 b,y,2
dtype: object
"""
if sep is None:
sep = ''
if others is not None:
arrays = _get_array_list(arr, others)
n = _length_check(arrays)
masks = np.array([isna(x) for x in arrays])
cats = None
if na_rep is None:
na_mask = np.logical_or.reduce(masks, axis=0)
result = np.empty(n, dtype=object)
np.putmask(result, na_mask, np.nan)
notmask = ~na_mask
tuples = zip(*[x[notmask] for x in arrays])
cats = [sep.join(tup) for tup in tuples]
result[notmask] = cats
else:
for i, x in enumerate(arrays):
x = np.where(masks[i], na_rep, x)
if cats is None:
cats = x
else:
cats = cats + sep + x
result = cats
return result
else:
arr = np.asarray(arr, dtype=object)
mask = isna(arr)
if na_rep is None and mask.any():
if sep == '':
na_rep = ''
else:
return sep.join(arr[notna(arr)])
return sep.join(np.where(mask, na_rep, arr))
def _length_check(others):
n = None
for x in others:
try:
if n is None:
n = len(x)
elif len(x) != n:
raise ValueError('All arrays must be same length')
except TypeError:
raise ValueError("Did you mean to supply a `sep` keyword?")
return n
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isna(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
Parameters
----------
pat : string, valid regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
counts : Series/Index of integer values
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Return boolean Series/``array`` whether given pattern/regex is
contained in each string in the Series/Index.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
regex : bool, default True
If True use re.search, otherwise use Python in operator
Returns
-------
contained : Series/array of boolean values
See Also
--------
match : analogous, but stricter, relying on re.match instead of re.search
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning,
stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Return boolean Series/``array`` indicating whether each string in the
Series/Index starts with passed pattern. Equivalent to
:meth:`str.startswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
startswith : Series/array of boolean values
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Return boolean Series indicating whether each string in the
Series/Index ends with passed pattern. Equivalent to
:meth:`str.endswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
endswith : Series/array of boolean values
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=None, flags=0):
"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string or compiled regex
String can be a character sequence or regular expression.
.. versionadded:: 0.20.0
`pat` also accepts a compiled regex.
repl : string or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
.. versionadded:: 0.20.0
`repl` also accepts a callable.
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
Returns
-------
replaced : Series/Index of objects
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case` or `flags` with a compiled regex will
raise an error.
Examples
--------
When `repl` is a string, every `pat` is replaced as with
:meth:`str.replace`. NaN value(s) in the Series are left as is.
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', 'b')
0 boo
1 buz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (is_string_like(repl) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError("case and flags cannot be set"
" when pat is a compiled regex")
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
use_re = is_compiled_re or len(pat) > 1 or flags or callable(repl)
if use_re:
n = n if n >= 0 else 0
regex = re.compile(pat, flags=flags)
f = lambda x: regex.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series/Index by indicated number
of times.
Parameters
----------
repeats : int or array
Same value for all (int) or different value per (array)
Returns
-------
repeated : Series/Index of objects
"""
if is_scalar(repeats):
def rep(x):
try:
return compat.binary_type.__mul__(x, repeats)
except TypeError:
return compat.text_type.__mul__(x, repeats)
return _na_map(rep, arr)
else:
def rep(x, r):
try:
return compat.binary_type.__mul__(x, r)
except TypeError:
return compat.text_type.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = lib.vec_binop(_values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=None):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
as_indexer : DEPRECATED - Keyword is ignored.
Returns
-------
Series/array of boolean values
See Also
--------
contains : analogous, but less strict, relying on re.search instead of
re.match
extract : extract matched groups
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if (as_indexer is False) and (regex.groups > 0):
raise ValueError("as_indexer=False with a pattern with groups is no "
"longer supported. Use '.str.extract(pat)' instead")
elif as_indexer is not None:
# Previously, this keyword was used for changing the default but
# deprecated behaviour. This keyword is now no longer needed.
warnings.warn("'as_indexer' keyword was specified but is ignored "
"(match now returns a boolean indexer by default), "
"and will be removed in a future version.",
FutureWarning, stacklevel=3)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, compat.string_types):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import DataFrame, Index
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=object)
def str_extract(arr, pat, flags=0, expand=None):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat.
.. versionadded:: 0.13.0
Parameters
----------
pat : string
Regular expression pattern with capturing groups
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
expand : bool, default False
* If True, return DataFrame.
* If False, return Series/Index/DataFrame.
.. versionadded:: 0.18.0
Returns
-------
DataFrame with one row for each subject string, and one column for
each group. Any capture group names in regular expression pat will
be used for column names; otherwise capture group numbers will be
used. The dtype of each result column is always object, even when
no match is found. If expand=False and pat has only one capture group,
then return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : returns all matches (not just the first match)
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = Series(['a1', 'b2', 'c3'])
>>> s.str.extract('([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract('([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract('(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract('[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract('[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if expand is None:
warnings.warn(
"currently extract(expand=None) " +
"means expand=False (return Index/Series/DataFrame) " +
"but in a future version of pandas this will be changed " +
"to expand=True (return DataFrame)",
FutureWarning,
stacklevel=3)
expand = False
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._data, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : string
Regular expression pattern with capturing groups
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
A DataFrame with one row for each match, and one column for each
group. Its rows have a MultiIndex with first levels that come from
the subject Series. The last level is named 'match' and indicates
the order in the subject. Any capture group names in regular
expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : returns first match only (not all matches)
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall("[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall("[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall("(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall("(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndex):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.iteritems():
if isinstance(subject, compat.string_types):
if not is_mi:
subject_key = (subject_key, )
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, compat.string_types):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group
for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i, ))
index_list.append(result_key)
if 0 < len(index_list):
from pandas import MultiIndex
index = MultiIndex.from_tuples(
index_list, names=arr.index.names + ["match"])
else:
index = None
result = arr._constructor_expanddim(match_list, index=index,
columns=columns)
return result
def str_get_dummies(arr, sep='|'):
"""
Split each string in the Series by sep and return a frame of
dummy/indicator variables.
Parameters
----------
sep : string, default "|"
String to split on.
Returns
-------
dummies : DataFrame
Examples
--------
>>> Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
See Also
--------
pandas.get_dummies
"""
arr = arr.fillna('')
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - set([""]))
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return dummies, tags
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with
passed delimiter. Equivalent to :meth:`str.join`.
Parameters
----------
sep : string
Delimiter
Returns
-------
joined : Series/Index of objects
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the
Series/Index. Equivalent to :func:`re.findall`.
Parameters
----------
pat : string
Pattern or regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
matches : Series/Index of lists
See Also
--------
extractall : returns DataFrame with one column per capture group
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``
Returns
-------
found : Series/Index of integer values
"""
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side='left'):
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'index'
elif side == 'right':
method = 'rindex'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side='left', fillchar=' '):
"""
Pad strings in the Series/Index with an additional character to
specified side.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with spaces
side : {'left', 'right', 'both'}, default 'left'
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
padded : Series/Index of objects
"""
if not isinstance(fillchar, compat.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if not is_integer(width):
msg = 'width must be of integer type, not {0}'
raise TypeError(msg.format(type(width).__name__))
if side == 'left':
f = lambda x: x.rjust(width, fillchar)
elif side == 'right':
f = lambda x: x.ljust(width, fillchar)
elif side == 'both':
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
"""
Split each string (a la re.split) in the Series/Index by given
pattern, propagating NA values. Equivalent to :meth:`str.split`.
Parameters
----------
pat : string, default None
String or regular expression to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
.. versionadded:: 0.16.1
return_type : deprecated, use `expand`
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
"""
Split each string in the Series/Index by the given delimiter
string, starting at the end of the string and working to the front.
Equivalent to :meth:`str.rsplit`.
.. versionadded:: 0.16.2
Parameters
----------
pat : string, default None
Separator to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series/Index
Parameters
----------
start : int or None
stop : int or None
step : int or None
Returns
-------
sliced : Series/Index of objects
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a slice of each string in the Series/Index with another
string.
Parameters
----------
start : int or None
stop : int or None
repl : str or None
String for replacement
Returns
-------
replaced : Series/Index of objects
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
stripped : Series/Index of objects
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width. (default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words. (default: True)
Returns
-------
wrapped : Series/Index of objects
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr)
def str_translate(arr, table, deletechars=None):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`. Note that the optional
argument deletechars is only valid if you are using python 2. For python 3,
character deletion should be specified via the table argument.
Parameters
----------
table : dict (python 3), str or None (python 2)
In python 3, table is a mapping of Unicode ordinals to Unicode
ordinals, strings, or None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
In python 2, table is either a string of length 256 or None. If the
table argument is None, no translation is applied and the operation
simply removes the characters in deletechars. :func:`string.maketrans`
is a helper function for making translation tables.
deletechars : str, optional (python 2)
A string of characters to delete. This argument is only valid
in python 2.
Returns
-------
translated : Series/Index of objects
"""
if deletechars is None:
f = lambda x: x.translate(table)
else:
from pandas import compat
if compat.PY3:
raise ValueError("deletechars is not a valid argument for "
"str.translate in python 3. You should simply "
"specify character deletions in the table "
"argument")
f = lambda x: x.translate(table, deletechars)
return _na_map(f, arr)
def str_get(arr, i):
"""
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Integer index (location)
Returns
-------
items : Series/Index of objects
"""
f = lambda x: x[i] if len(x) > i else np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
decoded : Series/Index of objects
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
if encoding in _cpython_optimized_encoders:
# CPython optimized implementation
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return _na_map(f, arr)
def _noarg_wrapper(f, docstring=None, **kargs):
def wrapper(self):
result = _na_map(f, self._data, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError('Provide docstring')
return wrapper
def _pat_wrapper(f, flags=False, na=False, **kwargs):
def wrapper1(self, pat):
result = f(self._data, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._data, pat, flags=flags, **kwargs)
return self._wrap_result(result)
def wrapper3(self, pat, na=np.nan):
result = f(self._data, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._is_categorical = is_categorical_dtype(data)
self._data = data.cat.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(self, result, use_codes=True,
name=None, expand=None):
from pandas.core.index import Index, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = False if result.ndim == 1 else True
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when infered
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, 'name', None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
return MultiIndex.from_tuples(result, names=name)
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
@copy(str_cat)
def cat(self, others=None, sep=None, na_rep=None):
data = self._orig if self._is_categorical else self._data
result = str_cat(data, others=others, sep=sep, na_rep=na_rep)
return self._wrap_result(result, use_codes=(not self._is_categorical))
@copy(str_split)
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._data, pat, n=n)
return self._wrap_result(result, expand=expand)
@copy(str_rsplit)
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._data, pat, n=n)
return self._wrap_result(result, expand=expand)
_shared_docs['str_partition'] = ("""
Split the string at the %(side)s occurrence of `sep`, and return 3 elements
containing the part before the separator, the separator itself,
and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
pat : string, default whitespace
String to split on.
expand : bool, default True
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Examples
--------
>>> s = Series(['A_B_C', 'D_E_F', 'X'])
0 A_B_C
1 D_E_F
2 X
dtype: object
>>> s.str.partition('_')
0 1 2
0 A _ B_C
1 D _ E_F
2 X
>>> s.str.rpartition('_')
0 1 2
0 A_B _ C
1 D_E _ F
2 X
""")
@Appender(_shared_docs['str_partition'] % {
'side': 'first',
'return': '3 elements containing the string itself, followed by two '
'empty strings',
'also': 'rpartition : Split the string at the last occurrence of `sep`'
})
def partition(self, pat=' ', expand=True):
f = lambda x: x.partition(pat)
result = _na_map(f, self._data)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs['str_partition'] % {
'side': 'last',
'return': '3 elements containing two empty strings, followed by the '
'string itself',
'also': 'partition : Split the string at the first occurrence of `sep`'
})
def rpartition(self, pat=' ', expand=True):
f = lambda x: x.rpartition(pat)
result = _na_map(f, self._data)
return self._wrap_result(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._data, i)
return self._wrap_result(result)
@copy(str_join)
def join(self, sep):
result = str_join(self._data, sep)
return self._wrap_result(result)
@copy(str_contains)
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(self._data, pat, case=case, flags=flags, na=na,
regex=regex)
return self._wrap_result(result)
@copy(str_match)
def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=None):
result = str_match(self._data, pat, case=case, flags=flags, na=na,
as_indexer=as_indexer)
return self._wrap_result(result)
@copy(str_replace)
def replace(self, pat, repl, n=-1, case=None, flags=0):
result = str_replace(self._data, pat, repl, n=n, case=case,
flags=flags)
return self._wrap_result(result)
@copy(str_repeat)
def repeat(self, repeats):
result = str_repeat(self._data, repeats)
return self._wrap_result(result)
@copy(str_pad)
def pad(self, width, side='left', fillchar=' '):
result = str_pad(self._data, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs['str_pad'] = ("""
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
""")
@Appender(_shared_docs['str_pad'] % dict(side='left and right',
method='center'))
def center(self, width, fillchar=' '):
return self.pad(width, side='both', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))
def ljust(self, width, fillchar=' '):
return self.pad(width, side='right', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))
def rjust(self, width, fillchar=' '):
return self.pad(width, side='left', fillchar=fillchar)
def zfill(self, width):
"""
Filling left side of strings in the Series/Index with 0.
Equivalent to :meth:`str.zfill`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with 0
Returns
-------
filled : Series/Index of objects
"""
result = str_pad(self._data, width, side='left', fillchar='0')
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._data, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._data, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
result = str_decode(self._data, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
def encode(self, encoding, errors="strict"):
result = str_encode(self._data, encoding, errors)
return self._wrap_result(result)
_shared_docs['str_strip'] = ("""
Strip whitespace (including newlines) from each string in the
Series/Index from %(side)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
stripped : Series/Index of objects
""")
@Appender(_shared_docs['str_strip'] % dict(side='left and right sides',
method='strip'))
def strip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='both')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='left side',
method='lstrip'))
def lstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='right side',
method='rstrip'))
def rstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='right')
return self._wrap_result(result)
@copy(str_wrap)
def wrap(self, width, **kwargs):
result = str_wrap(self._data, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
def get_dummies(self, sep='|'):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._data
result, name = str_get_dummies(data, sep)
return self._wrap_result(result, use_codes=(not self._is_categorical),
name=name, expand=True)
@copy(str_translate)
def translate(self, table, deletechars=None):
result = str_translate(self._data, table, deletechars)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True)
startswith = _pat_wrapper(str_startswith, na=True)
endswith = _pat_wrapper(str_endswith, na=True)
findall = _pat_wrapper(str_findall, flags=True)
@copy(str_extract)
def extract(self, pat, flags=0, expand=None):
return str_extract(self, pat, flags=flags, expand=expand)
@copy(str_extractall)
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags=flags)
_shared_docs['find'] = ("""
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
""")
@Appender(_shared_docs['find'] %
dict(side='lowest', method='find',
also='rfind : Return highest indexes in each strings'))
def find(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['find'] %
dict(side='highest', method='rfind',
also='find : Return lowest indexes in each strings'))
def rfind(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
def normalize(self, form):
"""Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, compat.u_safe(x))
result = _na_map(f, self._data)
return self._wrap_result(result)
_shared_docs['index'] = ("""
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.%(similar)s`` except instead of returning -1, it raises a ValueError
when the substring is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
""")
@Appender(_shared_docs['index'] %
dict(side='lowest', similar='find', method='index',
also='rindex : Return highest indexes in each strings'))
def index(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['index'] %
dict(side='highest', similar='rfind', method='rindex',
also='index : Return lowest indexes in each strings'))
def rindex(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
_shared_docs['len'] = ("""
Compute length of each string in the Series/Index.
Returns
-------
lengths : Series/Index of integer values
""")
len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)
_shared_docs['casemethods'] = ("""
Convert strings in the Series/Index to %(type)s.
Equivalent to :meth:`str.%(method)s`.
Returns
-------
converted : Series/Index of objects
""")
_shared_docs['lower'] = dict(type='lowercase', method='lower')
_shared_docs['upper'] = dict(type='uppercase', method='upper')
_shared_docs['title'] = dict(type='titlecase', method='title')
_shared_docs['capitalize'] = dict(type='be capitalized',
method='capitalize')
_shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')
lower = _noarg_wrapper(lambda x: x.lower(),
docstring=_shared_docs['casemethods'] %
_shared_docs['lower'])
upper = _noarg_wrapper(lambda x: x.upper(),
docstring=_shared_docs['casemethods'] %
_shared_docs['upper'])
title = _noarg_wrapper(lambda x: x.title(),
docstring=_shared_docs['casemethods'] %
_shared_docs['title'])
capitalize = _noarg_wrapper(lambda x: x.capitalize(),
docstring=_shared_docs['casemethods'] %
_shared_docs['capitalize'])
swapcase = _noarg_wrapper(lambda x: x.swapcase(),
docstring=_shared_docs['casemethods'] %
_shared_docs['swapcase'])
_shared_docs['ismethods'] = ("""
Check whether all characters in each string in the Series/Index
are %(type)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
is : Series/array of boolean values
""")
_shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum')
_shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha')
_shared_docs['isdigit'] = dict(type='digits', method='isdigit')
_shared_docs['isspace'] = dict(type='whitespace', method='isspace')
_shared_docs['islower'] = dict(type='lowercase', method='islower')
_shared_docs['isupper'] = dict(type='uppercase', method='isupper')
_shared_docs['istitle'] = dict(type='titlecase', method='istitle')
_shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric')
_shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal')
isalnum = _noarg_wrapper(lambda x: x.isalnum(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalnum'])
isalpha = _noarg_wrapper(lambda x: x.isalpha(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalpha'])
isdigit = _noarg_wrapper(lambda x: x.isdigit(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdigit'])
isspace = _noarg_wrapper(lambda x: x.isspace(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isspace'])
islower = _noarg_wrapper(lambda x: x.islower(),
docstring=_shared_docs['ismethods'] %
_shared_docs['islower'])
isupper = _noarg_wrapper(lambda x: x.isupper(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isupper'])
istitle = _noarg_wrapper(lambda x: x.istitle(),
docstring=_shared_docs['ismethods'] %
_shared_docs['istitle'])
isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isnumeric'])
isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdecimal'])
@classmethod
def _make_accessor(cls, data):
from pandas.core.index import Index
if (isinstance(data, ABCSeries) and
not ((is_categorical_dtype(data.dtype) and
is_object_dtype(data.values.categories)) or
(is_object_dtype(data.dtype)))):
# it's neither a string series not a categorical series with
# strings inside the categories.
# this really should exclude all series with any non-string values
# (instead of test for object dtype), but that isn't practical for
# performance reasons until we have a str dtype (GH 9343)
raise AttributeError("Can only use .str accessor with string "
"values, which use np.object_ dtype in "
"pandas")
elif isinstance(data, Index):
# can't use ABCIndex to exclude non-str
# see scc/inferrence.pyx which can contain string values
allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')
if data.inferred_type not in allowed_types:
message = ("Can only use .str accessor with string values "
"(i.e. inferred_type is 'string', 'unicode' or "
"'mixed')")
raise AttributeError(message)
if data.nlevels > 1:
message = ("Can only use .str accessor with Index, not "
"MultiIndex")
raise AttributeError(message)
return StringMethods(data)
class StringAccessorMixin(object):
""" Mixin to add a `.str` acessor to the class."""
str = AccessorProperty(StringMethods)
def _dir_additions(self):
return set()
def _dir_deletions(self):
try:
getattr(self, 'str')
except AttributeError:
return set(['str'])
return set()
|
{
"content_hash": "ccfea6db631b3716cff58a9f8c544de7",
"timestamp": "",
"source": "github",
"line_count": 1939,
"max_line_length": 79,
"avg_line_length": 30.999484270242395,
"alnum_prop": 0.5750482464896519,
"repo_name": "DGrady/pandas",
"id": "0b1db0277eee3fc0d7b7d80258333c157a7cf46b",
"size": "60108",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/core/strings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4071"
},
{
"name": "C",
"bytes": "493476"
},
{
"name": "C++",
"bytes": "17353"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "12054479"
},
{
"name": "R",
"bytes": "1177"
},
{
"name": "Shell",
"bytes": "22265"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
"""Home of the Sequential model, and the `save_model`/`load_model` functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import layers as layer_module
from tensorflow.contrib.keras.python.keras import optimizers
from tensorflow.contrib.keras.python.keras.engine import topology
from tensorflow.contrib.keras.python.keras.engine.topology import Input
from tensorflow.contrib.keras.python.keras.engine.topology import Layer
from tensorflow.contrib.keras.python.keras.engine.topology import TFBaseLayer
from tensorflow.contrib.keras.python.keras.engine.training import Model
from tensorflow.contrib.keras.python.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-import-not-at-top
try:
import h5py
except ImportError:
h5py = None
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
def save_model(model, filepath, overwrite=True, include_optimizer=True):
"""Save a model to a HDF5 file.
The saved model contains:
- the model's configuration (topology)
- the model's weights
- the model's optimizer's state (if any)
Thus the saved model can be reinstantiated in
the exact same state, without any of the code
used for model definition or training.
Arguments:
model: Keras model instance to be saved.
filepath: String, path where to save the model.
overwrite: Whether we should overwrite any existing
model at the target location, or instead
ask the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
Raises:
ImportError: if h5py is not available.
"""
if h5py is None:
raise ImportError('`save_model` requires h5py.')
def get_json_type(obj):
"""Serialize any object to a JSON-serializable structure.
Arguments:
obj: the object to serialize
Returns:
JSON-serializable structure representing `obj`.
Raises:
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, 'get_config'):
return {'class_name': obj.__class__.__name__, 'config': obj.get_config()}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return {'type': type(obj), 'value': obj.tolist()}
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
from tensorflow.contrib.keras.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
f = h5py.File(filepath, 'w')
f.attrs['keras_version'] = str(keras_version).encode('utf8')
f.attrs['backend'] = K.backend().encode('utf8')
f.attrs['model_config'] = json.dumps(
{
'class_name': model.__class__.__name__,
'config': model.get_config()
},
default=get_json_type).encode('utf8')
model_weights_group = f.create_group('model_weights')
model_layers = model.layers
topology.save_weights_to_hdf5_group(model_weights_group, model_layers)
if include_optimizer and hasattr(model, 'optimizer'):
if isinstance(model.optimizer, optimizers.TFOptimizer):
logging.warning(
'TensorFlow optimizers do not '
'make it possible to access '
'optimizer attributes or optimizer state '
'after instantiation. '
'As a result, we cannot save the optimizer '
'as part of the model save file.'
'You will have to compile your model again after loading it. '
'Prefer using a Keras optimizer instead '
'(see keras.io/optimizers).')
else:
f.attrs['training_config'] = json.dumps(
{
'optimizer_config': {
'class_name': model.optimizer.__class__.__name__,
'config': model.optimizer.get_config()
},
'loss': model.loss,
'metrics': model.metrics,
'sample_weight_mode': model.sample_weight_mode,
'loss_weights': model.loss_weights,
},
default=get_json_type).encode('utf8')
# Save optimizer weights.
symbolic_weights = getattr(model.optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = f.create_group('optimizer_weights')
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for w, val in zip(symbolic_weights, weight_values):
name = str(w.name)
weight_names.append(name.encode('utf8'))
optimizer_weights_group.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = optimizer_weights_group.create_dataset(
name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
f.flush()
f.close()
def load_model(filepath, custom_objects=None, compile=True): # pylint: disable=redefined-builtin
"""Loads a model saved via `save_model`.
Arguments:
filepath: String, path to the saved model.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
compile: Boolean, whether to compile the model
after loading.
Returns:
A Keras model instance. If an optimizer was found
as part of the saved model, the model is already
compiled. Otherwise, the model is uncompiled and
a warning will be displayed. When `compile` is set
to False, the compilation is omitted without any
warning.
Raises:
ImportError: if h5py is not available.
ValueError: In case of an invalid savefile.
"""
if h5py is None:
raise ImportError('`load_model` requires h5py.')
if not custom_objects:
custom_objects = {}
def convert_custom_objects(obj):
"""Handles custom object lookup.
Arguments:
obj: object, dict, or list.
Returns:
The same structure, where occurrences
of a custom object name have been replaced
with the custom object.
"""
if isinstance(obj, list):
deserialized = []
for value in obj:
deserialized.append(convert_custom_objects(value))
return deserialized
if isinstance(obj, dict):
deserialized = {}
for key, value in obj.items():
deserialized[key] = convert_custom_objects(value)
return deserialized
if obj in custom_objects:
return custom_objects[obj]
return obj
with h5py.File(filepath, mode='r') as f:
# instantiate model
model_config = f.attrs.get('model_config')
if model_config is None:
raise ValueError('No model found in config file.')
model_config = json.loads(model_config.decode('utf-8'))
model = model_from_config(model_config, custom_objects=custom_objects)
# set weights
topology.load_weights_from_hdf5_group(f['model_weights'], model.layers)
# Early return if compilation is not required.
if not compile:
return model
# instantiate optimizer
training_config = f.attrs.get('training_config')
if training_config is None:
logging.warning('No training configuration found in save file: '
'the model was *not* compiled. Compile it manually.')
return model
training_config = json.loads(training_config.decode('utf-8'))
optimizer_config = training_config['optimizer_config']
optimizer = optimizers.deserialize(
optimizer_config, custom_objects=custom_objects)
# Recover loss functions and metrics.
loss = convert_custom_objects(training_config['loss'])
metrics = convert_custom_objects(training_config['metrics'])
sample_weight_mode = training_config['sample_weight_mode']
loss_weights = training_config['loss_weights']
# Compile model.
model.compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode)
# Set optimizer weights.
if 'optimizer_weights' in f:
# Build train function (to get weight updates).
if isinstance(model, Sequential):
model.model._make_train_function()
else:
model._make_train_function()
optimizer_weights_group = f['optimizer_weights']
optimizer_weight_names = [
n.decode('utf8')
for n in optimizer_weights_group.attrs['weight_names']
]
optimizer_weight_values = [
optimizer_weights_group[n] for n in optimizer_weight_names
]
try:
model.optimizer.set_weights(optimizer_weight_values)
except ValueError:
logging.warning('Error in loading the saved optimizer '
'state. As a result, your model is '
'starting with a freshly initialized '
'optimizer.')
return model
def model_from_config(config, custom_objects=None):
"""Instantiates a Keras model from its config.
Arguments:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
"""
if isinstance(config, list):
raise TypeError('`model_from_config` expects a dictionary, not a list. '
'Maybe you meant to use '
'`Sequential.from_config(config)`?')
return layer_module.deserialize(config, custom_objects=custom_objects)
def model_from_yaml(yaml_string, custom_objects=None):
"""Parses a yaml model configuration file and returns a model instance.
Arguments:
yaml_string: YAML string encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError('Requires yaml module installed.')
config = yaml.load(yaml_string)
return layer_module.deserialize(config, custom_objects=custom_objects)
def model_from_json(json_string, custom_objects=None):
"""Parses a JSON model configuration file and returns a model instance.
Arguments:
json_string: JSON string encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
"""
config = json.loads(json_string)
return layer_module.deserialize(config, custom_objects=custom_objects)
class Sequential(Model):
"""Linear stack of layers.
Arguments:
layers: list of layers to add to the model.
# Note
The first layer passed to a Sequential model
should have a defined input shape. What that
means is that it should have received an `input_shape`
or `batch_input_shape` argument,
or for some type of layers (recurrent, Dense...)
an `input_dim` argument.
Example:
```python
model = Sequential()
# first layer must have a defined input shape
model.add(Dense(32, input_dim=500))
# afterwards, Keras does automatic shape inference
model.add(Dense(32))
# also possible (equivalent to the above):
model = Sequential()
model.add(Dense(32, input_shape=(500,)))
model.add(Dense(32))
# also possible (equivalent to the above):
model = Sequential()
# here the batch dimension is None,
# which means any batch size will be accepted by the model.
model.add(Dense(32, batch_input_shape=(None, 500)))
model.add(Dense(32))
```
"""
def __init__(self, layers=None, name=None):
self.layers = [] # Stack of layers.
self.model = None # Internal Model instance.
self.inputs = [] # List of input tensors
self.outputs = [] # List of length 1: the output tensor (unique).
self._trainable = True
self._initial_weights = None
self._input_layers = []
# Model attributes.
self.inbound_nodes = []
self.outbound_nodes = []
self.built = False
# Set model name.
if not name:
prefix = 'sequential_'
name = prefix + str(K.get_uid(prefix))
self.name = name
# The following properties are not actually used by Keras;
# they exist for compatibility with TF's variable scoping mechanism.
self._updates = []
self._losses = []
self._scope = None
self._reuse = None
self._base_name = name
self._graph = ops.get_default_graph()
# Add to the model any layers passed to the constructor.
if layers:
for layer in layers:
self.add(layer)
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Arguments:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
if not isinstance(layer, (Layer, TFBaseLayer)):
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
if not self.outputs:
# first layer in model: check that it is an input layer
if not layer.inbound_nodes:
# create an input layer
if not hasattr(layer, 'batch_input_shape'):
raise ValueError('The first layer in a '
'Sequential model must '
'get an `input_shape` or '
'`batch_input_shape` argument.')
# Instantiate the input layer.
x = Input(
batch_shape=layer.batch_input_shape,
dtype=layer.dtype,
name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
if len(layer.inbound_nodes) != 1:
raise ValueError('A layer added to a Sequential model must '
'not already be connected somewhere else. '
'Model received layer ' + layer.name + ' which has ' +
str(len(layer.inbound_nodes)) +
' pre-existing inbound connections.')
if len(layer.inbound_nodes[0].output_tensors) != 1:
raise ValueError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [layer.inbound_nodes[0].output_tensors[0]]
self.inputs = topology.get_source_inputs(self.outputs[0])
# We create an input node, which we will keep updated
# as we add more layers
topology.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self.inputs,
output_tensors=self.outputs)
else:
output_tensor = layer(self.outputs[0])
if isinstance(output_tensor, list):
raise TypeError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [output_tensor]
# update self.inbound_nodes
self.inbound_nodes[0].output_tensors = self.outputs
self.inbound_nodes[0].output_shapes = [K.int_shape(self.outputs[0])]
self.layers.append(layer)
self.built = False
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
self.layers.pop()
if not self.layers:
self.outputs = []
self.inbound_nodes = []
self.outbound_nodes = []
else:
self.layers[-1].outbound_nodes = []
self.outputs = [self.layers[-1].output]
# update self.inbound_nodes
self.inbound_nodes[0].output_tensors = self.outputs
self.inbound_nodes[0].output_shapes = [K.int_shape(self.outputs[0])]
self.built = False
def get_layer(self, name=None, index=None):
"""Retrieve a layer that is part of the model.
Returns a layer based on either its name (unique)
or its index in the graph. Indices are based on
order of horizontal graph traversal (bottom-up).
Arguments:
name: string, name of layer.
index: integer, index of layer.
Returns:
A layer instance.
"""
if self.model is None:
self.build()
return self.model.get_layer(name, index)
def call(self, inputs, mask=None):
if self.model is None:
self.build()
return self.model.call(inputs, mask)
def build(self, input_shape=None):
if not self.inputs or not self.outputs:
raise TypeError('Sequential model cannot be built: model is empty.'
' Add some layers first.')
# actually create the model
self.model = Model(self.inputs, self.outputs[0], name=self.name + '_model')
self.model.trainable = self.trainable
# mirror model attributes
self.supports_masking = self.model.supports_masking
self._output_mask_cache = self.model._output_mask_cache
self._output_tensor_cache = self.model._output_tensor_cache
self._output_shape_cache = self.model._output_shape_cache
self._input_layers = self.model._input_layers
self._output_layers = self.model._output_layers
self._input_coordinates = self.model._input_coordinates
self._output_coordinates = self.model._output_coordinates
self._nodes_by_depth = self.model._nodes_by_depth
self._network_nodes = self.model._network_nodes
self.output_names = self.model.output_names
self.input_names = self.model.input_names
self._feed_input_names = self.model._feed_input_names
self._feed_inputs = self.model._feed_inputs
# Make sure child model callbacks
# will call the parent Sequential model.
self.model.callback_model = self
self.built = True
@property
def uses_learning_phase(self):
if self.model is None:
self.build()
return self.model.uses_learning_phase
def _gather_list_attr(self, attr):
all_attrs = []
for layer in self.layers:
all_attrs += getattr(layer, attr, [])
return all_attrs
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
if self.model:
self.model.trainable = value
self._trainable = value
@property
def trainable_weights(self):
if not self.trainable:
return []
return self._gather_list_attr('trainable_weights')
@property
def non_trainable_weights(self):
weights = self._gather_list_attr('non_trainable_weights')
if not self.trainable:
trainable_weights = self._gather_list_attr('trainable_weights')
return trainable_weights + weights
return weights
@property
def updates(self):
if self.model is None:
self.build()
return self.model.updates
@property
def state_updates(self):
if self.model is None:
self.build()
return self.model.state_updates
def get_updates_for(self, inputs):
if self.model is None:
self.build()
return self.model.get_updates_for(inputs)
@property
def losses(self):
if self.model is None:
self.build()
return self.model.losses
def get_losses_for(self, inputs):
if self.model is None:
self.build()
return self.model.get_losses_for(inputs)
@property
def regularizers(self):
if self.model is None:
self.build()
return self.model.regularizers
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays
(one array per model weight).
"""
if self.model is None:
self.build()
return self.model.get_weights()
def set_weights(self, weights):
"""Sets the weights of the model.
Arguments:
weights: Should be a list
of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
if self.model is None:
self.build()
self.model.set_weights(weights)
def load_weights(self, filepath, by_name=False):
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
layers = self.layers
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
def save_weights(self, filepath, overwrite=True):
if h5py is None:
raise ImportError('`save_weights` requires h5py.')
# If file exists and should not be overwritten:
if not overwrite and os.path.isfile(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
layers = self.layers
f = h5py.File(filepath, 'w')
topology.save_weights_to_hdf5_group(f, layers)
f.flush()
f.close()
def compile(self,
optimizer,
loss,
metrics=None,
sample_weight_mode=None,
**kwargs):
"""Configures the learning process.
Arguments:
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [losses](/losses).
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
See [metrics](/metrics).
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
**kwargs: for Theano backend, these are passed into K.function.
When using the Tensorflow backend, these are passed into
`tf.Session.run`.
Example:
```python
model = Sequential()
model.add(Dense(32, input_shape=(500,)))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
```
"""
# create the underlying model
self.build()
# call compile method of Model class
self.model.compile(
optimizer,
loss,
metrics=metrics,
sample_weight_mode=sample_weight_mode,
**kwargs)
self.optimizer = self.model.optimizer
self.loss = self.model.loss
self.total_loss = self.model.total_loss
self.loss_weights = self.model.loss_weights
self.metrics = self.model.metrics
self.metrics_tensors = self.model.metrics_tensors
self.metrics_names = self.model.metrics_names
self.sample_weight_mode = self.model.sample_weight_mode
self.sample_weights = self.model.sample_weights
self.targets = self.model.targets
def fit(self,
x,
y,
batch_size=32,
epochs=10,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0):
"""Trains the model for a fixed number of epochs.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
y: labels, as a Numpy array.
batch_size: integer. Number of samples per gradient update.
epochs: integer, the number of epochs to train the model.
verbose: 0 for no logging to stdout,
1 for progress bar logging, 2 for one log line per epoch.
callbacks: list of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/callbacks).
validation_split: float (0. < x < 1).
Fraction of the data to use as held-out validation data.
validation_data: tuple (x_val, y_val) or tuple
(x_val, y_val, val_sample_weights) to be used as held-out
validation data. Will override validation_split.
shuffle: boolean or str (for 'batch').
Whether to shuffle the samples at each epoch.
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
class_weight: dictionary mapping classes to a weight value,
used for scaling the loss function (during training only).
sample_weight: Numpy array of weights for
the training samples, used for scaling the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: if the model was never compiled.
"""
if self.model is None:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.fit(
x,
y,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
shuffle=shuffle,
class_weight=class_weight,
sample_weight=sample_weight,
initial_epoch=initial_epoch)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
"""Computes the loss on some input data, batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
y: labels, as a Numpy array.
batch_size: integer. Number of samples per gradient update.
verbose: verbosity mode, 0 or 1.
sample_weight: sample weights, as a Numpy array.
Returns:
Scalar test loss (if the model has no metrics)
or list of scalars (if the model computes other metrics).
The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
RuntimeError: if the model was never compiled.
"""
if self.model is None:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.evaluate(
x,
y,
batch_size=batch_size,
verbose=verbose,
sample_weight=sample_weight)
def predict(self, x, batch_size=32, verbose=0):
"""Generates output predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: the input data, as a Numpy array.
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of predictions.
"""
if self.model is None:
self.build()
return self.model.predict(x, batch_size=batch_size, verbose=verbose)
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
Returns:
A Numpy array of predictions.
"""
if self.model is None:
self.build()
return self.model.predict_on_batch(x)
def train_on_batch(self, x, y, class_weight=None, sample_weight=None):
"""Single gradient update over one batch of samples.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
y: labels, as a Numpy array.
class_weight: dictionary mapping classes to a weight value,
used for scaling the loss function (during training only).
sample_weight: sample weights, as a Numpy array.
Returns:
Scalar training loss (if the model has no metrics)
or list of scalars (if the model computes other metrics).
The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
RuntimeError: if the model was never compiled.
"""
if self.model is None:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.train_on_batch(
x, y, sample_weight=sample_weight, class_weight=class_weight)
def test_on_batch(self, x, y, sample_weight=None):
"""Evaluates the model over a single batch of samples.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
y: labels, as a Numpy array.
sample_weight: sample weights, as a Numpy array.
Returns:
Scalar test loss (if the model has no metrics)
or list of scalars (if the model computes other metrics).
The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
RuntimeError: if the model was never compiled.
"""
if self.model is None:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.test_on_batch(x, y, sample_weight=sample_weight)
def predict_proba(self, x, batch_size=32, verbose=1):
"""Generates class probability predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of probability predictions.
"""
preds = self.predict(x, batch_size, verbose)
if preds.min() < 0. or preds.max() > 1.:
logging.warning('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
def predict_classes(self, x, batch_size=32, verbose=1):
"""Generate class predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
"""
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def fit_generator(self,
generator,
steps_per_epoch,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
initial_epoch=0,
**kwargs):
"""Fits the model on data generated batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
Arguments:
generator: A generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of unique samples of your dataset
divided by the batch size.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- A generator for the validation data
- A tuple (inputs, targets)
- A tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator.
Number of steps to yield from validation generator
at the end of every epoch. It should typically
be equal to the number of unique samples of your
validation dataset divided by the batch size.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Maximum size for the generator queue
workers: Maximum number of processes to spin up
use_multiprocessing: If True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
**kwargs: support for legacy arguments.
Returns:
A `History` object.
Raises:
RuntimeError: if the model was never compiled.
ValueError: In case the generator yields
data in an invalid format.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create Numpy arrays of input data
# and labels, from each line in the file
x, y = process_line(line)
yield (x, y)
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=1000, epochs=10)
```
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
if self.model is None:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.fit_generator(
generator,
steps_per_epoch,
epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_queue_size: maximum size for the generator queue
workers: maximum number of processes to spin up
use_multiprocessing: if True, use process based threading.
Note that because this implementation
relies on multiprocessing, you should not pass
non picklable arguments to the generator
as they can't be passed easily to children processes.
**kwargs: support for legacy arguments.
Returns:
Scalar test loss (if the model has no metrics)
or list of scalars (if the model computes other metrics).
The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
RuntimeError: if the model was never compiled.
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
if self.model is None:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.evaluate_generator(
generator,
steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def predict_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0,
**kwargs):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: generator yielding batches of input samples.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_queue_size: maximum size for the generator queue
workers: maximum number of processes to spin up
use_multiprocessing: if True, use process based threading.
Note that because this implementation
relies on multiprocessing, you should not pass
non picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: verbosity mode, 0 or 1.
**kwargs: support for legacy arguments.
Returns:
A Numpy array of predictions.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
if self.model is None:
self.build()
return self.model.predict_generator(
generator,
steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def get_config(self):
config = []
for layer in self.layers:
config.append({
'class_name': layer.__class__.__name__,
'config': layer.get_config()
})
return copy.deepcopy(config)
@classmethod
def from_config(cls, config, custom_objects=None):
model = cls()
for conf in config:
layer = layer_module.deserialize(conf, custom_objects=custom_objects)
model.add(layer)
return model
|
{
"content_hash": "9a2d86041d80642492debca962e652ce",
"timestamp": "",
"source": "github",
"line_count": 1218,
"max_line_length": 119,
"avg_line_length": 35.14860426929393,
"alnum_prop": 0.6266380135946369,
"repo_name": "jostep/tensorflow",
"id": "1a0d95c7ff24f3ff658cce7ef86204c04820806b",
"size": "43535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/keras/python/keras/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "191403"
},
{
"name": "C++",
"bytes": "28623031"
},
{
"name": "CMake",
"bytes": "635936"
},
{
"name": "Go",
"bytes": "957914"
},
{
"name": "Java",
"bytes": "404257"
},
{
"name": "Jupyter Notebook",
"bytes": "1833674"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38060"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "264080"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "25225588"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "371221"
}
],
"symlink_target": ""
}
|
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import ValidationError, NotFound, PermissionDenied
from framework.auth.oauth_scopes import CoreScopes
from osf.models import AbstractNode, Registration, OSFUser
from api.base import permissions as base_permissions
from api.base import generic_bulk_views as bulk_views
from api.base.filters import ListFilterMixin
from api.base.views import JSONAPIBaseView, BaseContributorDetail, BaseContributorList, BaseNodeLinksDetail, BaseNodeLinksList, WaterButlerMixin
from api.base.serializers import HideIfWithdrawal, LinkedRegistrationsRelationshipSerializer
from api.base.serializers import LinkedNodesRelationshipSerializer
from api.base.pagination import NodeContributorPagination
from api.base.parsers import JSONAPIRelationshipParser
from api.base.parsers import JSONAPIRelationshipParserForRegularJSON
from api.base.utils import get_user_auth, default_node_list_permission_queryset, is_bulk_request, is_truthy
from api.comments.serializers import RegistrationCommentSerializer, CommentCreateSerializer
from api.identifiers.serializers import RegistrationIdentifierSerializer
from api.nodes.views import NodeIdentifierList
from api.users.views import UserMixin
from api.users.serializers import UserSerializer
from api.nodes.permissions import (
ReadOnlyIfRegistration,
ContributorDetailPermissions,
ContributorOrPublic,
ContributorOrPublicForRelationshipPointers,
AdminOrPublic,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
from api.registrations.serializers import (
RegistrationSerializer,
RegistrationDetailSerializer,
RegistrationContributorsSerializer,
RegistrationProviderSerializer
)
from api.nodes.filters import NodesFilterMixin
from api.nodes.views import (
NodeMixin, NodeRegistrationsList, NodeLogList,
NodeCommentsList, NodeProvidersList, NodeFilesList, NodeFileDetail,
NodeInstitutionsList, NodeForksList, NodeWikiList, LinkedNodesList,
NodeViewOnlyLinksList, NodeViewOnlyLinkDetail, NodeCitationDetail, NodeCitationStyleDetail,
NodeLinkedRegistrationsList,
)
from api.registrations.serializers import RegistrationNodeLinksSerializer, RegistrationFileSerializer
from api.wikis.serializers import RegistrationWikiSerializer
from api.base.utils import get_object_or_error
class RegistrationMixin(NodeMixin):
"""Mixin with convenience methods for retrieving the current registration based on the
current URL. By default, fetches the current registration based on the node_id kwarg.
"""
serializer_class = RegistrationSerializer
node_lookup_url_kwarg = 'node_id'
def get_node(self, check_object_permissions=True):
node = get_object_or_error(
AbstractNode,
self.kwargs[self.node_lookup_url_kwarg],
self.request,
display_name='node'
)
# Nodes that are folders/collections are treated as a separate resource, so if the client
# requests a collection through a node endpoint, we return a 404
if node.is_collection or not node.is_registration:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class RegistrationList(JSONAPIBaseView, generics.ListAPIView, bulk_views.BulkUpdateJSONAPIView, NodesFilterMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_list).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationSerializer
view_category = 'registrations'
view_name = 'registration-list'
ordering = ('-modified',)
model_class = Registration
# overrides BulkUpdateJSONAPIView
def get_serializer_class(self):
"""
Use RegistrationDetailSerializer which requires 'id'
"""
if self.request.method in ('PUT', 'PATCH'):
return RegistrationDetailSerializer
else:
return RegistrationSerializer
# overrides NodesFilterMixin
def get_default_queryset(self):
return default_node_list_permission_queryset(user=self.request.user, model_cls=Registration)
def is_blacklisted(self):
query_params = self.parse_query_params(self.request.query_params)
for key, field_names in query_params.iteritems():
for field_name, data in field_names.iteritems():
field = self.serializer_class._declared_fields.get(field_name)
if isinstance(field, HideIfWithdrawal):
return True
return False
# overrides ListAPIView, ListBulkCreateJSONAPIView
def get_queryset(self):
# For bulk requests, queryset is formed from request body.
if is_bulk_request(self.request):
auth = get_user_auth(self.request)
registrations = Registration.objects.filter(guids___id__in=[registration['id'] for registration in self.request.data])
# If skip_uneditable=True in query_params, skip nodes for which the user
# does not have EDIT permissions.
if is_truthy(self.request.query_params.get('skip_uneditable', False)):
has_permission = registrations.filter(contributor__user_id=auth.user.id, contributor__write=True).values_list('guids___id', flat=True)
return Registration.objects.filter(guids___id__in=has_permission)
for registration in registrations:
if not registration.can_edit(auth):
raise PermissionDenied
return registrations
blacklisted = self.is_blacklisted()
registrations = self.get_queryset_from_request()
# If attempting to filter on a blacklisted field, exclude withdrawals.
if blacklisted:
return registrations.exclude(retraction__isnull=False)
return registrations
class RegistrationDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, RegistrationMixin, WaterButlerMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_read).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
AdminOrPublic,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationDetailSerializer
view_category = 'registrations'
view_name = 'registration-detail'
# overrides RetrieveAPIView
def get_object(self):
registration = self.get_node()
if not registration.is_registration:
raise ValidationError('This is not a registration.')
return registration
class RegistrationContributorsList(BaseContributorList, RegistrationMixin, UserMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_contributors_list).
"""
view_category = 'registrations'
view_name = 'registration-contributors'
pagination_class = NodeContributorPagination
serializer_class = RegistrationContributorsSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
def get_default_queryset(self):
node = self.get_node(check_object_permissions=False)
return node.contributor_set.all().include('user__guids')
class RegistrationContributorDetail(BaseContributorDetail, RegistrationMixin, UserMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_contributors_read).
"""
view_category = 'registrations'
view_name = 'registration-contributor-detail'
serializer_class = RegistrationContributorsSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
class RegistrationImplicitContributorsList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin, RegistrationMixin):
permission_classes = (
AdminOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CONTRIBUTORS_READ]
required_write_scopes = [CoreScopes.NULL]
model_class = OSFUser
serializer_class = UserSerializer
view_category = 'registrations'
view_name = 'registration-implicit-contributors'
ordering = ('_order',) # default ordering
def get_default_queryset(self):
node = self.get_node()
return node.parent_admin_contributors
def get_queryset(self):
queryset = self.get_queryset_from_request()
return queryset
class RegistrationChildrenList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_children_list).
"""
view_category = 'registrations'
view_name = 'registration-children'
serializer_class = RegistrationSerializer
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
ordering = ('-modified',)
def get_default_queryset(self):
return default_node_list_permission_queryset(user=self.request.user, model_cls=Registration)
def get_queryset(self):
registration = self.get_node()
registration_pks = registration.node_relations.filter(is_node_link=False).select_related('child').values_list('child__pk', flat=True)
return self.get_queryset_from_request().filter(pk__in=registration_pks).can_view(self.request.user).order_by('-modified')
class RegistrationCitationDetail(NodeCitationDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_citations_list).
"""
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
view_category = 'registrations'
view_name = 'registration-citation'
class RegistrationCitationStyleDetail(NodeCitationStyleDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_citation_read).
"""
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
view_category = 'registrations'
view_name = 'registration-style-citation'
class RegistrationForksList(NodeForksList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_forks_list).
"""
view_category = 'registrations'
view_name = 'registration-forks'
class RegistrationCommentsList(NodeCommentsList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_comments_list).
"""
serializer_class = RegistrationCommentSerializer
view_category = 'registrations'
view_name = 'registration-comments'
def get_serializer_class(self):
if self.request.method == 'POST':
return CommentCreateSerializer
else:
return RegistrationCommentSerializer
class RegistrationLogList(NodeLogList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_logs_list).
"""
view_category = 'registrations'
view_name = 'registration-logs'
class RegistrationProvidersList(NodeProvidersList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_providers_list).
"""
serializer_class = RegistrationProviderSerializer
view_category = 'registrations'
view_name = 'registration-providers'
class RegistrationNodeLinksList(BaseNodeLinksList, RegistrationMixin):
"""Node Links to other nodes. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Node Link Attributes
`type` is "node_links"
None
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
### Target Node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Adding Node Links
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "node_links", # required
"relationships": {
"nodes": {
"data": {
"type": "nodes", # required
"id": "{target_node_id}", # required
}
}
}
}
}
Success: 201 CREATED + node link representation
To add a node link (a pointer to another node), issue a POST request to this endpoint. This effectively creates a
relationship between the node and the target node. The target node must be described as a relationship object with
a "data" member, containing the nodes `type` and the target node `id`.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-pointers'
serializer_class = RegistrationNodeLinksSerializer
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
# TODO: This class doesn't exist
# model_class = Pointer
class RegistrationNodeLinksDetail(BaseNodeLinksDetail, RegistrationMixin):
"""Node Link details. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Attributes
`type` is "node_links"
None
##Links
*None*
##Relationships
###Target node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Remove Node Link
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To remove a node link from a node, issue a DELETE request to the `self` link. This request will remove the
relationship between the node and the target node, not the nodes themselves.
##Query Params
*None*.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-pointer-detail'
serializer_class = RegistrationNodeLinksSerializer
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
# TODO: this class doesn't exist
# model_class = Pointer
# overrides RetrieveAPIView
def get_object(self):
registration = self.get_node()
if not registration.is_registration:
raise ValidationError('This is not a registration.')
return registration
class RegistrationRegistrationsList(NodeRegistrationsList, RegistrationMixin):
"""List of registrations of a registration."""
view_category = 'registrations'
view_name = 'registration-registrations'
class RegistrationFilesList(NodeFilesList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_files_list).
"""
view_category = 'registrations'
view_name = 'registration-files'
serializer_class = RegistrationFileSerializer
class RegistrationFileDetail(NodeFileDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_files_read).
"""
view_category = 'registrations'
view_name = 'registration-file-detail'
serializer_class = RegistrationFileSerializer
class RegistrationInstitutionsList(NodeInstitutionsList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_institutions_list).
"""
view_category = 'registrations'
view_name = 'registration-institutions'
class RegistrationWikiList(NodeWikiList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_wikis_list).
"""
view_category = 'registrations'
view_name = 'registration-wikis'
serializer_class = RegistrationWikiSerializer
class RegistrationLinkedNodesList(LinkedNodesList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_linked_nodes_list).
"""
view_category = 'registrations'
view_name = 'linked-nodes'
class RegistrationLinkedNodesRelationship(JSONAPIBaseView, generics.RetrieveAPIView, RegistrationMixin):
""" Relationship Endpoint for Nodes -> Linked Node relationships
Used to retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
"""
view_category = 'registrations'
view_name = 'node-pointer-relationship'
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = LinkedNodesRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
def get_object(self):
node = self.get_node(check_object_permissions=False)
auth = get_user_auth(self.request)
obj = {'data': [
linked_node for linked_node in
node.linked_nodes.filter(is_deleted=False).exclude(type='osf.collection').exclude(type='osf.registration')
if linked_node.can_view(auth)
], 'self': node}
self.check_object_permissions(self.request, obj)
return obj
class RegistrationLinkedRegistrationsRelationship(JSONAPIBaseView, generics.RetrieveAPIView, RegistrationMixin):
"""Relationship Endpoint for Registration -> Linked Registration relationships. *Read-only*
Used to retrieve the ids of the linked registrations attached to this collection. For each id, there
exists a node link that contains that registration.
"""
view_category = 'registrations'
view_name = 'node-registration-pointer-relationship'
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = LinkedRegistrationsRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON,)
def get_object(self):
node = self.get_node(check_object_permissions=False)
auth = get_user_auth(self.request)
obj = {
'data': [
linked_registration for linked_registration in
node.linked_nodes.filter(is_deleted=False, type='osf.registration').exclude(type='osf.collection')
if linked_registration.can_view(auth)
],
'self': node
}
self.check_object_permissions(self.request, obj)
return obj
class RegistrationLinkedRegistrationsList(NodeLinkedRegistrationsList, RegistrationMixin):
"""List of registrations linked to this registration. *Read-only*.
Linked registrations are the registration nodes pointed to by node links.
<!--- Copied Spiel from RegistrationDetail -->
Registrations are read-only snapshots of a project. This view shows details about the given registration.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registration's detail view are not necessary. A withdrawn registration will display a limited subset of information,
namely, title, description, created, registration, withdrawn, date_registered, withdrawal_justification, and
registration supplement. All other fields will be displayed as null. Additionally, the only relationships permitted
to be accessed for a withdrawn registration are the contributors - other relationships will return a 403.
##Linked Registration Attributes
<!--- Copied Attributes from RegistrationDetail -->
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean has this project been registered? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = RegistrationSerializer
view_category = 'registrations'
view_name = 'linked-registrations'
class RegistrationViewOnlyLinksList(NodeViewOnlyLinksList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_view_only_links_list).
"""
required_read_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_WRITE]
view_category = 'registrations'
view_name = 'registration-view-only-links'
class RegistrationViewOnlyLinkDetail(NodeViewOnlyLinkDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_view_only_links_read).
"""
required_read_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_WRITE]
view_category = 'registrations'
view_name = 'registration-view-only-link-detail'
class RegistrationIdentifierList(RegistrationMixin, NodeIdentifierList):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_identifiers_list).
"""
serializer_class = RegistrationIdentifierSerializer
|
{
"content_hash": "6acb15363d5c4d2070361d5a000c58a3",
"timestamp": "",
"source": "github",
"line_count": 673,
"max_line_length": 150,
"avg_line_length": 40.803863298662705,
"alnum_prop": 0.6965150577182185,
"repo_name": "binoculars/osf.io",
"id": "25d487a64e482fcb96a8edf4a53cf7b2b6b85e95",
"size": "27461",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api/registrations/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "106867"
},
{
"name": "HTML",
"bytes": "236223"
},
{
"name": "JavaScript",
"bytes": "1831128"
},
{
"name": "Mako",
"bytes": "666783"
},
{
"name": "Python",
"bytes": "7866290"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
import SimpleXMLRPCServer as xmls
def echo(msg):
print 'Got', msg
return msg
class echoserver(xmls.SimpleXMLRPCServer):
allow_reuse_address = True
server = echoserver(('127.0.0.1', 8001))
server.register_function(echo, 'echo')
print 'Listening on port 8001'
try:
server.serve_forever()
except:
server.server_close()
|
{
"content_hash": "efee93b31fae91ae9847a66afd43f468",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 42,
"avg_line_length": 21.1875,
"alnum_prop": 0.7138643067846607,
"repo_name": "keobox/yap101",
"id": "7ce16cb7ee2c1e090289468a70fd88401aba8ddc",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/xml-rpc/echoserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "159082"
},
{
"name": "Python",
"bytes": "667"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
}
|
def find_words(letters):
results = set()
def extend_prefix(w, letters):
if w in WORDS: results.add(w)
if w not in PREFIXES: return
for L in letters:
return extend_prefix(w+L, removed(letters, L))
extend_prefix('', letters)
return results
|
{
"content_hash": "f320adfb5df820a107a9475880976857",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 49,
"avg_line_length": 22.90909090909091,
"alnum_prop": 0.6944444444444444,
"repo_name": "feredean/cs313",
"id": "758a7c1bb061591a35f644b3c09e4d739c506101",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notes/test_recursivity-nested.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "210982"
}
],
"symlink_target": ""
}
|
import sys
from Bio import SeqIO
# nanocorrect is zero-based, exclusive endpoints
recs = len([rec for rec in SeqIO.parse(open(sys.argv[1]), "fasta")])
batch_size = 50
for n in xrange(0, recs, batch_size):
if (n + batch_size) > recs:
print "%d:%d" % (n, recs)
else:
print "%d:%d" % (n, n + batch_size)
|
{
"content_hash": "f7917e92e461e6cf884b86ba45387435",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 25.75,
"alnum_prop": 0.6440129449838188,
"repo_name": "jts/nanocorrect",
"id": "9d8828973c5d9ea67f21e48a7caec8c73471ec30",
"size": "310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makerange.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "436671"
},
{
"name": "Makefile",
"bytes": "919"
},
{
"name": "Perl",
"bytes": "573"
},
{
"name": "Python",
"bytes": "7235"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
}
|
"""Code to exist off of google.appengine.dist.
Kept in a separate file from the __init__ module for testing purposes.
"""
__all__ = ['use_library']
try:
import distutils.version
except ImportError:
distutils = None
import os
import sys
server_software = os.getenv('SERVER_SOFTWARE')
USING_SDK = not server_software or server_software.startswith('Dev')
del server_software
_DESIRED_DJANGO_VERSION = 'v0_96'
AUTO_IMPORT_FIXER_FILE = 'auto_import_fixer.py'
def fix_paths(app_path, python_lib_path):
"""Fix the __path__ attr of sys.modules entries.
Specifically this fixes the path of those sys.modules package entries that
have __path__ attributes that point to the python library, but where there
is a similar package in the application's code.
Args:
app_path: The root path of the application code.
python_lib_path: The root path of the python library.
"""
if os.path.isfile(os.path.join(app_path, AUTO_IMPORT_FIXER_FILE)):
return
for module_name, module in sys.modules.items():
if getattr(module, '__path__', None) is None:
continue
module_app_path = os.path.join(app_path, *module_name.split('.'))
module_init_file = os.path.join(module_app_path, '__init__.py')
if not os.path.isfile(module_init_file):
continue
found_python_lib_path = False
found_app_path = False
for path in module.__path__:
if path.startswith(python_lib_path):
found_python_lib_path = True
if path.startswith(app_path):
found_app_path = True
if found_python_lib_path and not found_app_path:
module.__path__.append(module_app_path)
try:
import google
except ImportError:
import google as google
if not USING_SDK:
this_version = os.path.dirname(os.path.dirname(google.__file__))
versions = os.path.dirname(this_version)
PYTHON_LIB = os.path.dirname(versions)
fix_paths(sys.path[-1], PYTHON_LIB)
del this_version, versions
else:
PYTHON_LIB = os.path.dirname(os.path.dirname(google.__file__))
del google
installed = {}
def SetAllowedModule(_):
pass
class UnacceptableVersionError(Exception):
"""Raised when a version of a package that is unacceptable is requested."""
pass
class LooseVersion(object):
"""Shallow class compatible with distutils.version.LooseVersion."""
def __init__(self, version):
"""Create a new instance of LooseVersion.
Args:
version: iterable containing the version values.
"""
self.version = tuple(map(str, version))
def __repr__(self):
return '.'.join(self.version)
def __str__(self):
return '.'.join(self.version)
@classmethod
def parse(cls, string):
"""Parse a version string and create a new LooseVersion instance.
Args:
string: dot delimited version string.
Returns:
A distutils.version.LooseVersion compatible object.
"""
return cls(string.split('.'))
def DjangoVersion():
"""Discover the version of Django installed.
Returns:
A distutils.version.LooseVersion.
"""
try:
__import__('django.' + _DESIRED_DJANGO_VERSION)
except ImportError:
pass
import django
try:
return distutils.version.LooseVersion('.'.join(map(str, django.VERSION)))
except AttributeError:
return LooseVersion(django.VERSION)
def PylonsVersion():
"""Discover the version of Pylons installed.
Returns:
A distutils.version.LooseVersion.
"""
import pylons
return distutils.version.LooseVersion(pylons.__version__)
PACKAGES = {
'django': (DjangoVersion,
{'0.96': None,
'1.0': None,
'1.1': None,
'1.2': None,
'1.3': None,
}),
'_test': (lambda: distutils.version.LooseVersion('1.0'), {'1.0': None}),
'_testpkg': (lambda: distutils.version.LooseVersion('1.0'),
{'1.0': set([('_test', '1.0')])}),
}
def EqualVersions(version, baseline):
"""Test that a version is acceptable as compared to the baseline.
Meant to be used to compare version numbers as returned by a package itself
and not user input.
Args:
version: distutils.version.LooseVersion.
The version that is being checked.
baseline: distutils.version.LooseVersion.
The version that one hopes version compares equal to.
Returns:
A bool indicating whether the versions are considered equal.
"""
baseline_tuple = baseline.version
truncated_tuple = version.version[:len(baseline_tuple)]
if truncated_tuple == baseline_tuple:
return True
else:
return False
def AllowInstalledLibrary(name, desired):
"""Allow the use of a package without performing a version check.
Needed to clear a package's dependencies in case the dependencies need to be
imported in order to perform a version check. The version check is skipped on
the dependencies because the assumption is that the package that triggered
the call would not be installed without the proper dependencies (which might
be a different version than what the package explicitly requires).
Args:
name: Name of package.
desired: Desired version.
Raises:
UnacceptableVersion Error if the installed version of a package is
unacceptable.
"""
CallSetAllowedModule(name, desired)
dependencies = PACKAGES[name][1][desired]
if dependencies:
for dep_name, dep_version in dependencies:
AllowInstalledLibrary(dep_name, dep_version)
installed[name] = desired, False
def CheckInstalledLibrary(name, desired):
"""Check that the library and its dependencies are installed.
Args:
name: Name of the library that should be installed.
desired: The desired version.
Raises:
UnacceptableVersionError if the installed version of a package is
unacceptable.
"""
dependencies = PACKAGES[name][1][desired]
if dependencies:
for dep_name, dep_version in dependencies:
AllowInstalledLibrary(dep_name, dep_version)
CheckInstalledVersion(name, desired, explicit=True)
def CheckInstalledVersion(name, desired, explicit):
"""Check that the installed version of a package is acceptable.
Args:
name: Name of package.
desired: Desired version string.
explicit: Explicitly requested by the user or implicitly because of a
dependency.
Raises:
UnacceptableVersionError if the installed version of a package is
unacceptable.
"""
CallSetAllowedModule(name, desired)
find_version = PACKAGES[name][0]
if name == 'django':
global _DESIRED_DJANGO_VERSION
_DESIRED_DJANGO_VERSION = 'v' + desired.replace('.', '_')
installed_version = find_version()
try:
desired_version = distutils.version.LooseVersion(desired)
except AttributeError:
desired_version = LooseVersion.parse(desired)
if not EqualVersions(installed_version, desired_version):
raise UnacceptableVersionError(
'%s %s was requested, but %s is already in use' %
(name, desired_version, installed_version))
installed[name] = desired, explicit
def CallSetAllowedModule(name, desired):
"""Helper to call SetAllowedModule(name), after special-casing Django."""
if USING_SDK and name == 'django':
sys.path[:] = [dirname
for dirname in sys.path
if not dirname.startswith(os.path.join(
PYTHON_LIB, 'lib', 'django'))]
if desired in ('0.96', '1.2', '1.3'):
sys.path.insert(1, os.path.join(PYTHON_LIB, 'lib',
'django_' + desired.replace('.', '_')))
SetAllowedModule(name)
def CreatePath(name, version):
"""Create the path to a package."""
package_dir = '%s-%s' % (name, version)
return os.path.join(PYTHON_LIB, 'versions', 'third_party', package_dir)
def RemoveLibrary(name):
"""Remove a library that has been installed."""
installed_version, _ = installed[name]
path = CreatePath(name, installed_version)
try:
sys.path.remove(path)
except ValueError:
pass
del installed[name]
def AddLibrary(name, version, explicit):
"""Add a library to sys.path and 'installed'."""
sys.path.insert(1, CreatePath(name, version))
installed[name] = version, explicit
def InstallLibrary(name, version, explicit=True):
"""Install a package.
If the installation is explicit then the user made the installation request,
not a package as a dependency. Explicit installation leads to stricter
version checking.
Args:
name: Name of the requested package (already validated as available).
version: The desired version (already validated as available).
explicit: Explicitly requested by the user or implicitly because of a
dependency.
"""
installed_version, explicitly_installed = installed.get(name, [None] * 2)
if name in sys.modules:
if explicit:
CheckInstalledVersion(name, version, explicit=True)
return
elif installed_version:
if version == installed_version:
return
if explicit:
if explicitly_installed:
raise ValueError('%s %s requested, but %s already in use' %
(name, version, installed_version))
RemoveLibrary(name)
else:
version_ob = distutils.version.LooseVersion(version)
installed_ob = distutils.version.LooseVersion(installed_version)
if version_ob <= installed_ob:
return
else:
RemoveLibrary(name)
AddLibrary(name, version, explicit)
dep_details = PACKAGES[name][1][version]
if not dep_details:
return
for dep_name, dep_version in dep_details:
InstallLibrary(dep_name, dep_version, explicit=False)
def use_library(name, version):
"""Specify a third-party package to use.
Args:
name: Name of package to use.
version: Version of the package to use (string).
"""
if name not in PACKAGES:
raise ValueError('%s is not a supported package' % name)
versions = PACKAGES[name][1].keys()
if version not in versions:
raise ValueError('%s is not a supported version for %s; '
'supported versions are %s' % (version, name, versions))
if USING_SDK:
CheckInstalledLibrary(name, version)
else:
InstallLibrary(name, version, explicit=True)
if not USING_SDK:
InstallLibrary('django', '0.96', explicit=False)
|
{
"content_hash": "1d44853c93d8440c780baf6f58402841",
"timestamp": "",
"source": "github",
"line_count": 435,
"max_line_length": 79,
"avg_line_length": 23.680459770114943,
"alnum_prop": 0.6767304145228619,
"repo_name": "illicitonion/givabit",
"id": "289d8c999b3c67699e99a5cd9b1da47d5d1d523b",
"size": "10918",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/sdks/google_appengine_1.7.1/google_appengine/google/appengine/dist/_library.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "87423"
},
{
"name": "C",
"bytes": "2369361"
},
{
"name": "C#",
"bytes": "6965"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Java",
"bytes": "374"
},
{
"name": "JavaScript",
"bytes": "10909968"
},
{
"name": "Python",
"bytes": "27960052"
},
{
"name": "Ruby",
"bytes": "25648"
},
{
"name": "Shell",
"bytes": "8763"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
"""manage test imports."""
import unittest
def suite():
"""collect and run all tests for gateway."""
return unittest.TestLoader().discover("gateway.tests", pattern="*.py")
|
{
"content_hash": "39e8887ab5542bfda5d20816ae75d76b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 74,
"avg_line_length": 26,
"alnum_prop": 0.6758241758241759,
"repo_name": "davisk/gateway",
"id": "aebce2ad9a15b5b5575a93fe89bb1db1061e02fe",
"size": "182",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gateway/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "212650"
},
{
"name": "JavaScript",
"bytes": "23853"
},
{
"name": "Python",
"bytes": "25257"
},
{
"name": "Shell",
"bytes": "2357"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0004_batchfile_hash_sha512'),
]
operations = [
migrations.AlterField(
model_name='batchfile',
name='first_row',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
]
|
{
"content_hash": "a0ab2a5a150578d76835c06a036a8bc2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 88,
"avg_line_length": 24.57894736842105,
"alnum_prop": 0.6445396145610278,
"repo_name": "aginzberg/crowdsource-platform",
"id": "bcfeb6d4b05fed1caf5d37409625165e0916ffdd",
"size": "537",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop2",
"path": "crowdsourcing/migrations/0005_auto_20151207_2358.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "328687"
},
{
"name": "HTML",
"bytes": "178994"
},
{
"name": "JavaScript",
"bytes": "168588"
},
{
"name": "Python",
"bytes": "339941"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import inspect
import random
import re
import six
def camelcase_to_underscores(argument):
''' Converts a camelcase param like theNewAttribute to the equivalent
python underscore variable like the_new_attribute'''
result = ''
prev_char_title = True
for char in argument:
if char.istitle() and not prev_char_title:
# Only add underscore if char is capital, not first letter, and prev
# char wasn't capital
result += "_"
prev_char_title = char.istitle()
if not char.isspace(): # Only add non-whitespace
result += char.lower()
return result
def underscores_to_camelcase(argument):
''' Converts a camelcase param like the_new_attribute to the equivalent
camelcase version like theNewAttribute. Note that the first letter is
NOT capitalized by this function '''
result = ''
previous_was_underscore = False
for char in argument:
if char != '_':
if previous_was_underscore:
result += char.upper()
else:
result += char
previous_was_underscore = char == '_'
return result
def method_names_from_class(clazz):
# On Python 2, methods are different from functions, and the `inspect`
# predicates distinguish between them. On Python 3, methods are just
# regular functions, and `inspect.ismethod` doesn't work, so we have to
# use `inspect.isfunction` instead
if six.PY2:
predicate = inspect.ismethod
else:
predicate = inspect.isfunction
return [x[0] for x in inspect.getmembers(clazz, predicate=predicate)]
def get_random_hex(length=8):
chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f']
return ''.join(six.text_type(random.choice(chars)) for x in range(length))
def get_random_message_id():
return '{0}-{1}-{2}-{3}-{4}'.format(get_random_hex(8), get_random_hex(4), get_random_hex(4), get_random_hex(4), get_random_hex(12))
def convert_regex_to_flask_path(url_path):
"""
Converts a regex matching url to one that can be used with flask
"""
for token in ["$"]:
url_path = url_path.replace(token, "")
def caller(reg):
match_name, match_pattern = reg.groups()
return '<regex("{0}"):{1}>'.format(match_pattern, match_name)
url_path = re.sub("\(\?P<(.*?)>(.*?)\)", caller, url_path)
if url_path.endswith("/?"):
# Flask does own handling of trailing slashes
url_path = url_path.rstrip("/?")
return url_path
class convert_flask_to_httpretty_response(object):
def __init__(self, callback):
self.callback = callback
@property
def __name__(self):
# For instance methods, use class and method names. Otherwise
# use module and method name
if inspect.ismethod(self.callback):
outer = self.callback.__self__.__class__.__name__
else:
outer = self.callback.__module__
return "{0}.{1}".format(outer, self.callback.__name__)
def __call__(self, args=None, **kwargs):
from flask import request
result = self.callback(request, request.url, {})
# result is a status, headers, response tuple
status, headers, response = result
return response, status, headers
def iso_8601_datetime_with_milliseconds(datetime):
return datetime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z'
def rfc_1123_datetime(datetime):
RFC1123 = '%a, %d %b %Y %H:%M:%S GMT'
return datetime.strftime(RFC1123)
def unix_time(dt=None):
dt = dt or datetime.datetime.utcnow()
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return (delta.days * 86400) + (delta.seconds + (delta.microseconds / 1e6))
def unix_time_millis(dt=None):
return unix_time(dt) * 1000.0
|
{
"content_hash": "8aa7d3ed60a88bdf63faddbb9d92094c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 135,
"avg_line_length": 31.54471544715447,
"alnum_prop": 0.6257731958762887,
"repo_name": "silveregg/moto",
"id": "0b30556acdff49e8d02c5e0dc3a0572f54eb7e72",
"size": "3880",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moto/core/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "230"
},
{
"name": "Python",
"bytes": "2435907"
}
],
"symlink_target": ""
}
|
import errno
import json
import logging
import math
import time
import unittest
from threading import Event, Thread
import cook.executor as ce
import cook.progress as cp
import tests.utils as tu
class ProgressTest(unittest.TestCase):
def test_match_progress_update(self):
progress_regex_string = '\^\^\^\^JOB-PROGRESS:\s+([0-9]*\.?[0-9]+)($|\s+.*)'
progress_watcher = cp.ProgressWatcher('', '', None, 1, progress_regex_string, None, None, None)
def match_progress_update(input_string):
return progress_watcher.match_progress_update(input_string)
self.assertIsNone(match_progress_update(b'One percent complete'))
self.assertIsNone(match_progress_update(b'^^^^JOB-PROGRESS: 1done'))
self.assertIsNone(match_progress_update(b'^^^^JOB-PROGRESS: 1.0done'))
self.assertIsNone(match_progress_update(b'^^^^JOB-PROGRESS 1 One percent complete'))
self.assertIsNone(match_progress_update(b'JOB-PROGRESS: 1 One percent complete'))
self.assertEqual((b'1', b''),
match_progress_update(b'^^^^JOB-PROGRESS: 1'))
self.assertEqual((b'1', b''),
match_progress_update(b'^^^^JOB-PROGRESS: 1'))
self.assertEqual((b'1', b' '),
match_progress_update(b'^^^^JOB-PROGRESS: 1 '))
self.assertEqual((b'1', b' done'),
match_progress_update(b'^^^^JOB-PROGRESS: 1 done'))
self.assertEqual((b'1', b' One percent complete'),
match_progress_update(b'^^^^JOB-PROGRESS: 1 One percent complete'))
self.assertEqual((b'1', b' One percent complete'),
match_progress_update(b'^^^^JOB-PROGRESS: 1 One percent complete'))
self.assertEqual((b'50', b' Fifty percent complete'),
match_progress_update(b'^^^^JOB-PROGRESS: 50 Fifty percent complete'))
# Fractions in progress update are also supported
self.assertEqual((b'2.2', b''),
match_progress_update(b'^^^^JOB-PROGRESS: 2.2'))
self.assertEqual((b'2.0', b' Two percent complete'),
match_progress_update(b'^^^^JOB-PROGRESS: 2.0 Two percent complete'))
self.assertEqual((b'2.0', b' Two percent complete'),
match_progress_update(b'^^^^JOB-PROGRESS: 2.0 Two percent complete'))
self.assertEqual((b'2.0', b'\tTwo percent complete'),
match_progress_update(b'^^^^JOB-PROGRESS: 2.0\tTwo percent complete'))
def send_progress_message_helper(self, driver, max_message_length):
def send_progress_message(message):
ce.send_message(driver, tu.fake_os_error_handler, message)
self.assertTrue('progress-message' in message)
self.assertLessEqual(len(message['progress-message']), max_message_length)
return len(message['progress-message']) <= max_message_length
return send_progress_message
def test_send_progress_update(self):
driver = tu.FakeMesosExecutorDriver()
task_id = tu.get_random_task_id()
max_message_length = 30
poll_interval_ms = 100
send_progress_message = self.send_progress_message_helper(driver, max_message_length)
progress_updater = cp.ProgressUpdater(task_id, max_message_length, poll_interval_ms, send_progress_message)
progress_data_0 = {'progress-message': b' Progress message-0', 'progress-sequence': 1}
progress_updater.send_progress_update(progress_data_0)
self.assertEqual(1, len(driver.messages))
actual_encoded_message_0 = driver.messages[0]
expected_message_0 = {'progress-message': 'Progress message-0', 'progress-sequence': 1, 'task-id': task_id}
tu.assert_message(self, expected_message_0, actual_encoded_message_0)
progress_data_1 = {'progress-message': b' Progress message-1', 'progress-sequence': 2}
progress_updater.send_progress_update(progress_data_1)
self.assertEqual(1, len(driver.messages))
time.sleep(poll_interval_ms / 1000.0)
progress_data_2 = {'progress-message': b' Progress message-2', 'progress-sequence': 3}
progress_updater.send_progress_update(progress_data_2)
self.assertEqual(2, len(driver.messages))
actual_encoded_message_2 = driver.messages[1]
expected_message_2 = {'progress-message': 'Progress message-2', 'progress-sequence': 3, 'task-id': task_id}
tu.assert_message(self, expected_message_2, actual_encoded_message_2)
def test_send_progress_update_trims_progress_message(self):
driver = tu.FakeMesosExecutorDriver()
task_id = tu.get_random_task_id()
max_message_length = 30
poll_interval_ms = 10
send_progress_message = self.send_progress_message_helper(driver, max_message_length)
progress_updater = cp.ProgressUpdater(task_id, max_message_length, poll_interval_ms, send_progress_message)
progress_data_0 = {'progress-message': b' Progress message-0 is really long lorem ipsum dolor sit amet text',
'progress-sequence': 1}
progress_updater.send_progress_update(progress_data_0)
self.assertEqual(1, len(driver.messages))
actual_encoded_message_0 = driver.messages[0]
expected_message_0 = {'progress-message': 'Progress message-0 is reall...',
'progress-sequence': 1,
'task-id': task_id}
tu.assert_message(self, expected_message_0, actual_encoded_message_0)
def test_send_progress_does_not_trim_unknown_field(self):
driver = tu.FakeMesosExecutorDriver()
task_id = tu.get_random_task_id()
max_message_length = 30
poll_interval_ms = 10
send_progress_message = self.send_progress_message_helper(driver, max_message_length)
progress_updater = cp.ProgressUpdater(task_id, max_message_length, poll_interval_ms, send_progress_message)
progress_data_0 = {'progress-message': b' pm',
'progress-sequence': 1,
'unknown': 'Unknown field has a really long lorem ipsum dolor sit amet exceed limit text'}
progress_updater.send_progress_update(progress_data_0)
self.assertEqual(1, len(driver.messages))
actual_encoded_message_0 = driver.messages[0]
expected_message_0 = {'progress-message': 'pm',
'progress-sequence': 1,
'task-id': task_id,
'unknown': 'Unknown field has a really long lorem ipsum dolor sit amet exceed limit text'}
tu.assert_message(self, expected_message_0, actual_encoded_message_0)
def test_watcher_tail(self):
file_name = tu.ensure_directory('build/tail_progress_test.' + tu.get_random_task_id())
items_to_write = 12
stop = Event()
completed = Event()
termination = Event()
write_sleep_ms = 50
tail_sleep_ms = 25
try:
def write_to_file():
file = open(file_name, 'w+')
for item in range(items_to_write):
time.sleep(write_sleep_ms / 1000.0)
file.write('{}\n'.format(item))
file.flush()
file.close()
time.sleep(0.15)
completed.set()
Thread(target=write_to_file, args=()).start()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 1024, '', stop, completed, termination)
collected_data = []
for line in watcher.tail(tail_sleep_ms):
collected_data.append(line.strip())
self.assertEqual(items_to_write, len(collected_data))
self.assertEqual(list(map(lambda x: str.encode(str(x)), range(items_to_write))), collected_data)
finally:
tu.cleanup_file(file_name)
def test_watcher_tail_lot_of_writes(self):
file_name = tu.ensure_directory('build/tail_progress_test.' + tu.get_random_task_id())
items_to_write = 250000
stop = Event()
completed = Event()
termination = Event()
tail_sleep_ms = 25
try:
def write_to_file():
file = open(file_name, 'w+')
for item in range(items_to_write):
file.write('line-{}\n'.format(item))
if item % 100 == 0:
file.flush()
file.flush()
file.close()
time.sleep(0.15)
completed.set()
Thread(target=write_to_file, args=()).start()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 1024, '', stop, completed, termination)
collected_data = []
for line in watcher.tail(tail_sleep_ms):
collected_data.append(line.strip())
logging.info('Items read: {}'.format(len(collected_data)))
if items_to_write != len(collected_data):
for index in range(len(collected_data)):
logging.info('{}: {}'.format(index, collected_data[index]))
self.assertEqual(items_to_write, len(collected_data))
expected_data = list(map(lambda x: str.encode('line-{}'.format(x)), range(items_to_write)))
self.assertEqual(expected_data, collected_data)
finally:
tu.cleanup_file(file_name)
def test_watcher_tail_with_read_limit(self):
file_name = tu.ensure_directory('build/tail_progress_test.' + tu.get_random_task_id())
stop = Event()
completed = Event()
termination = Event()
tail_sleep_ms = 25
try:
def write_to_file():
file = open(file_name, 'w+')
file.write('abcd\n')
file.flush()
file.write('abcdefghijkl\n')
file.flush()
file.write('abcdefghijklmnopqrstuvwxyz\n')
file.flush()
file.close()
time.sleep(0.15)
completed.set()
Thread(target=write_to_file, args=()).start()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 10, '', stop, completed, termination)
collected_data = []
for line in watcher.tail(tail_sleep_ms):
collected_data.append(line.strip())
logging.debug('collected_data = {}'.format(collected_data))
expected_data = [b'abcd',
b'abcdefghij', b'kl',
b'abcdefghij', b'klmnopqrst', b'uvwxyz']
self.assertEqual(expected_data, collected_data)
finally:
tu.cleanup_file(file_name)
def test_collect_progress_updates_one_capture_group(self):
file_name = tu.ensure_directory('build/collect_progress_test.' + tu.get_random_task_id())
progress_regex = '\^\^\^\^JOB-PROGRESS:\s+([0-9]*\.?[0-9]+)$'
stop = Event()
completed = Event()
termination = Event()
file = open(file_name, 'w+')
file.flush()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 1024, progress_regex, stop, completed, termination)
try:
def print_to_file():
file.write('Stage One complete\n')
file.write('^^^^JOB-PROGRESS: 50\n')
file.write('Stage Three complete\n')
file.write('^^^^JOB-PROGRESS: 55.0\n')
file.write('^^^^JOB-PROGRESS: 65.8 Sixty-six percent\n')
file.write('^^^^JOB-PROGRESS: 98.8\n')
file.write('^^^^JOB-PROGRESS: 99.8\n')
file.write('^^^^JOB-PROGRESS: 100.0\n')
file.write('^^^^JOB-PROGRESS: 198.8\n')
file.flush()
file.close()
print_thread = Thread(target=print_to_file, args=())
print_thread.start()
progress_states = [{'progress-message': b'', 'progress-percent': 50, 'progress-sequence': 1},
{'progress-message': b'', 'progress-percent': 55, 'progress-sequence': 2},
{'progress-message': b'', 'progress-percent': 99, 'progress-sequence': 3},
{'progress-message': b'', 'progress-percent': 100, 'progress-sequence': 4},
{'progress-message': b'', 'progress-percent': 100, 'progress-sequence': 5}]
for actual_progress_state in watcher.retrieve_progress_states():
expected_progress_state = progress_states.pop(0)
self.assertEqual(expected_progress_state, actual_progress_state)
self.assertEqual(expected_progress_state, watcher.current_progress())
if not progress_states:
completed.set()
self.assertFalse(progress_states)
print_thread.join()
finally:
completed.set()
tu.cleanup_file(file_name)
def test_collect_progress_updates_two_capture_groups(self):
file_name = tu.ensure_directory('build/collect_progress_test.' + tu.get_random_task_id())
progress_regex = '\^\^\^\^JOB-PROGRESS:\s+([0-9]*\.?[0-9]+)($|\s+.*)'
stop = Event()
completed = Event()
termination = Event()
file = open(file_name, 'w+')
file.flush()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 1024, progress_regex, stop, completed, termination)
try:
def print_to_file():
file.write('Stage One complete\n')
file.write('^^^^JOB-PROGRESS: 25 Twenty-Five\n')
file.write('^^^^JOB-PROGRESS: 50 Fifty\n')
file.write('Stage Three complete\n')
file.write('^^^^JOB-PROGRESS: 55.0 Fifty-five\n')
file.write('^^^^JOB-PROGRESS: 65.8 Sixty-six\n')
file.write('Stage Four complete\n')
file.write('^^^^JOB-PROGRESS: 100 Hundred\n')
file.write('^^^^JOB-PROGRESS: 100.1 Over a hundred\n')
file.flush()
file.close()
print_thread = Thread(target=print_to_file, args=())
print_thread.start()
progress_states = [{'progress-message': b' Twenty-Five', 'progress-percent': 25, 'progress-sequence': 1},
{'progress-message': b' Fifty', 'progress-percent': 50, 'progress-sequence': 2},
{'progress-message': b' Fifty-five', 'progress-percent': 55, 'progress-sequence': 3},
{'progress-message': b' Sixty-six', 'progress-percent': 66, 'progress-sequence': 4},
{'progress-message': b' Hundred', 'progress-percent': 100, 'progress-sequence': 5}]
for actual_progress_state in watcher.retrieve_progress_states():
expected_progress_state = progress_states.pop(0)
self.assertEqual(expected_progress_state, actual_progress_state)
self.assertEqual(expected_progress_state, watcher.current_progress())
if not progress_states:
completed.set()
self.assertFalse(progress_states)
print_thread.join()
finally:
completed.set()
tu.cleanup_file(file_name)
def test_progress_updates_early_termination(self):
file_name = tu.ensure_directory('build/collect_progress_test.' + tu.get_random_task_id())
progress_regex = '\^\^\^\^JOB-PROGRESS:\s+([0-9]*\.?[0-9]+)($|\s+.*)'
stop = Event()
completed = Event()
termination = Event()
termination_trigger = Event()
file = open(file_name, 'w+')
file.flush()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 1024, progress_regex, stop, completed, termination)
try:
def print_to_file():
file.write('Stage One complete\n')
file.write('^^^^JOB-PROGRESS: 25 Twenty-Five\n')
file.write('^^^^JOB-PROGRESS: 50 Fifty\n')
file.flush()
logging.info('Awaiting termination_trigger')
termination_trigger.wait()
logging.info('termination_trigger has been set')
termination.set()
file.write('Stage Three complete\n')
file.write('^^^^JOB-PROGRESS: 55 Fifty-five\n')
file.write('Stage Four complete\n')
file.write('^^^^JOB-PROGRESS: 100 Hundred\n')
file.flush()
file.close()
completed.set()
print_thread = Thread(target=print_to_file, args=())
print_thread.daemon = True
print_thread.start()
progress_states = [{'progress-message': b' Twenty-Five', 'progress-percent': 25, 'progress-sequence': 1},
{'progress-message': b' Fifty', 'progress-percent': 50, 'progress-sequence': 2}]
for actual_progress_state in watcher.retrieve_progress_states():
expected_progress_state = progress_states.pop(0)
self.assertEqual(expected_progress_state, actual_progress_state)
self.assertEqual(expected_progress_state, watcher.current_progress())
if expected_progress_state['progress-percent'] == 50:
termination_trigger.set()
self.assertFalse(progress_states)
print_thread.join()
finally:
completed.set()
tu.cleanup_file(file_name)
def test_collect_progress_updates_skip_faulty(self):
file_name = tu.ensure_directory('build/collect_progress_updates_skip_faulty.' + tu.get_random_task_id())
progress_regex = '\^\^\^\^JOB-PROGRESS:\s+([0-9]*\.?[0-9]+)($|\s+.*)'
stop = Event()
completed = Event()
termination = Event()
file = open(file_name, 'w+')
file.flush()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 1024, progress_regex, stop, completed, termination)
try:
def print_to_file():
file.write('^^^^JOB-PROGRESS: F50 Fifty percent\n')
file.write('^^^^JOB-PROGRESS: 100.1 Over a hundred percent\n')
file.write('^^^^JOB-PROGRESS: 200 Two-hundred percent\n')
file.write('^^^^JOB-PROGRESS: 121212121212121212 Huge percent\n')
file.write('^^^^JOB-PROGRESS: 075 75% percent\n')
file.flush()
file.close()
completed.set()
print_thread = Thread(target=print_to_file, args=())
print_thread.start()
progress_states = [{'progress-message': b' 75% percent', 'progress-percent': 75, 'progress-sequence': 1}]
for actual_progress_state in watcher.retrieve_progress_states():
expected_progress_state = progress_states.pop(0)
self.assertEqual(expected_progress_state, actual_progress_state)
self.assertEqual(expected_progress_state, watcher.current_progress())
self.assertFalse(progress_states)
print_thread.join()
finally:
completed.set()
tu.cleanup_file(file_name)
def test_collect_progress_updates_faulty_regex(self):
file_name = tu.ensure_directory('build/collect_progress_updates_skip_faulty.' + tu.get_random_task_id())
progress_regex = '\^\^\^\^JOB-PROGRESS: (\S+)(?: )?(.*)'
stop = Event()
completed = Event()
termination = Event()
file = open(file_name, 'w+')
file.flush()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 1024, progress_regex, stop, completed, termination)
try:
def print_to_file():
file.write('^^^^JOB-PROGRESS: ABCDEF string percent\n')
file.write('^^^^JOB-PROGRESS: F50 Fifty percent\n')
file.write('^^^^JOB-PROGRESS: 1019101010101010101010101018101101010101010110171010110 Sixty percent\n')
file.write('^^^^JOB-PROGRESS: 75 75% percent\n')
file.flush()
file.close()
completed.set()
print_thread = Thread(target=print_to_file, args=())
print_thread.start()
progress_states = [{'progress-message': b'75% percent', 'progress-percent': 75, 'progress-sequence': 1}]
for actual_progress_state in watcher.retrieve_progress_states():
expected_progress_state = progress_states.pop(0)
self.assertEqual(expected_progress_state, actual_progress_state)
self.assertEqual(expected_progress_state, watcher.current_progress())
self.assertFalse(progress_states)
print_thread.join()
finally:
completed.set()
tu.cleanup_file(file_name)
def test_collect_progress_updates_dev_null(self):
file_name = tu.ensure_directory('build/collect_progress_test.' + tu.get_random_task_id())
progress_regex = '\^\^\^\^JOB-PROGRESS:\s+([0-9]*\.?[0-9]+)($|\s+.*)'
location = '/dev/null'
stop = Event()
completed = Event()
termination = Event()
file = open(file_name, 'w+')
file.flush()
counter = cp.ProgressSequenceCounter()
dn_watcher = cp.ProgressWatcher(location, 'dn', counter, 1024, progress_regex, stop, completed, termination)
out_watcher = cp.ProgressWatcher(file_name, 'so', counter, 1024, progress_regex, stop, completed, termination)
try:
def print_to_file():
file.write('Stage One complete\n')
file.write('^^^^JOB-PROGRESS: 100 100-percent\n')
file.flush()
file.close()
completed.set()
print_thread = Thread(target=print_to_file, args=())
print_thread.start()
progress_states = [{'progress-message': b' 100-percent', 'progress-percent': 100, 'progress-sequence': 1}]
for actual_progress_state in out_watcher.retrieve_progress_states():
expected_progress_state = progress_states.pop(0)
self.assertEqual(expected_progress_state, actual_progress_state)
self.assertEqual(expected_progress_state, out_watcher.current_progress())
self.assertFalse(progress_states)
iterable = dn_watcher.retrieve_progress_states()
exhausted = object()
self.assertEqual(exhausted, next(iterable, exhausted))
self.assertIsNone(dn_watcher.current_progress())
print_thread.join()
finally:
completed.set()
tu.cleanup_file(file_name)
def test_collect_progress_updates_lots_of_writes(self):
file_name = tu.ensure_directory('build/collect_progress_test.' + tu.get_random_task_id())
progress_regex = 'progress: ([0-9]*\.?[0-9]+), (.*)'
items_to_write = 250000
stop = Event()
completed = Event()
termination = Event()
def write_to_file():
target_file = open(file_name, 'w+')
unit_progress_granularity = int(items_to_write / 100)
for item in range(items_to_write):
remainder = (item + 1) % unit_progress_granularity
if remainder == 0:
progress_percent = math.ceil(item / unit_progress_granularity)
target_file.write('progress: {0}, completed-{0}-percent\n'.format(progress_percent))
target_file.flush()
target_file.write('{}\n'.format(item))
target_file.flush()
target_file.close()
time.sleep(0.15)
write_thread = Thread(target=write_to_file, args=())
write_thread.daemon = True
write_thread.start()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 1024, progress_regex, stop, completed, termination)
try:
progress_states = list(map(lambda x: {'progress-message': 'completed-{}-percent'.format(x).encode(),
'progress-percent': x,
'progress-sequence': x},
range(1, 101)))
for actual_progress_state in watcher.retrieve_progress_states():
expected_progress_state = progress_states.pop(0)
self.assertEqual(expected_progress_state, actual_progress_state)
self.assertEqual(expected_progress_state, watcher.current_progress())
if not progress_states:
completed.set()
self.assertFalse(progress_states)
write_thread.join()
finally:
completed.set()
tu.cleanup_file(file_name)
def test_collect_progress_updates_with_empty_regex(self):
file_name = tu.ensure_directory('build/collect_progress_test.' + tu.get_random_task_id())
progress_regex = ''
stop = Event()
completed = Event()
termination = Event()
file = open(file_name, 'w+')
file.flush()
counter = cp.ProgressSequenceCounter()
watcher = cp.ProgressWatcher(file_name, 'test', counter, 1024, progress_regex, stop, completed, termination)
try:
def print_to_file():
file.write('Stage One complete\n')
file.write('^^^^JOB-PROGRESS: 25 Twenty-Five percent\n')
file.write('Stage Two complete\n')
file.write('^^^^JOB-PROGRESS: 50 Fifty percent\n')
file.write('Stage Three complete\n')
file.write('^^^^JOB-PROGRESS: 55.0 Fifty-five percent\n')
file.write('Stage Four complete\n')
file.write('^^^^JOB-PROGRESS: 100 100-percent\n')
file.flush()
file.close()
completed.set()
print_thread = Thread(target=print_to_file, args=())
print_thread.start()
progress_states = []
for actual_progress_state in watcher.retrieve_progress_states():
expected_progress_state = progress_states.pop(0)
self.assertEqual(expected_progress_state, actual_progress_state)
self.assertEqual(expected_progress_state, watcher.current_progress())
self.assertFalse(progress_states)
self.assertIsNone(watcher.current_progress())
finally:
completed.set()
tu.cleanup_file(file_name)
def test_retrieve_progress_states_os_error_from_tail(self):
class FakeProgressWatcher(cp.ProgressWatcher):
def __init__(self, output_name, location_tag, sequence_counter, max_bytes_read_per_line,
progress_regex_string, stop_signal, task_completed_signal, progress_termination_signal):
super().__init__(output_name, location_tag, sequence_counter, max_bytes_read_per_line,
progress_regex_string, stop_signal, task_completed_signal, progress_termination_signal)
def tail(self, sleep_time_ms):
yield (b'Stage One complete')
yield (b'progress: 25 Twenty-Five percent')
raise OSError(errno.ENOMEM, 'No Memory')
regex = 'progress: ([0-9]*\.?[0-9]+) (.*)'
counter = cp.ProgressSequenceCounter()
watcher = FakeProgressWatcher('', '', counter, 1024, regex, Event(), Event(), Event())
with self.assertRaises(OSError) as context:
for progress in watcher.retrieve_progress_states():
self.assertIsNotNone(progress)
self.assertEqual('No Memory', context.exception.strerror)
def test_retrieve_progress_states_os_error_from_match_progress_update(self):
class FakeProgressWatcher(cp.ProgressWatcher):
def __init__(self, output_name, location_tag, sequence_counter, max_bytes_read_per_line,
progress_regex_string, stop_signal, task_completed_signal, progress_termination_signal):
super().__init__(output_name, location_tag, sequence_counter, max_bytes_read_per_line,
progress_regex_string, stop_signal, task_completed_signal, progress_termination_signal)
def tail(self, sleep_time_ms):
yield (b'Stage One complete')
yield (b'progress: 25 Twenty-Five percent')
yield (b'Stage Two complete')
def match_progress_update(self, input_data):
if self.current_progress() is not None:
raise OSError(errno.ENOMEM, 'No Memory')
else:
return super().match_progress_update(input_data)
regex = 'progress: ([0-9]*\.?[0-9]+) (.*)'
counter = cp.ProgressSequenceCounter()
watcher = FakeProgressWatcher('', '', counter, 1024, regex, Event(), Event(), Event())
with self.assertRaises(OSError) as context:
for progress in watcher.retrieve_progress_states():
self.assertIsNotNone(progress)
self.assertEqual('No Memory', context.exception.strerror)
|
{
"content_hash": "ca0247d01a391c43ebd89915bd258ec4",
"timestamp": "",
"source": "github",
"line_count": 648,
"max_line_length": 120,
"avg_line_length": 46.248456790123456,
"alnum_prop": 0.5708899195835697,
"repo_name": "twosigma/Cook",
"id": "96eae55f594d6552c8fa48b0b2e85dd6a4760124",
"size": "29969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "executor/tests/test_progress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "Clojure",
"bytes": "2506248"
},
{
"name": "Dockerfile",
"bytes": "2638"
},
{
"name": "Java",
"bytes": "268686"
},
{
"name": "Jupyter Notebook",
"bytes": "8047"
},
{
"name": "Makefile",
"bytes": "638"
},
{
"name": "Python",
"bytes": "978718"
},
{
"name": "Shell",
"bytes": "51541"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redis
short_description: Various redis commands, slave and flush
description:
- Unified utility to interact with redis instances.
version_added: "1.3"
options:
command:
description:
- The selected redis command
- C(config) (new in 1.6), ensures a configuration setting on an instance.
- C(flush) flushes all the instance or a specified db.
- C(slave) sets a redis instance in slave or master mode.
required: true
choices: [ config, flush, slave ]
login_password:
description:
- The password used to authenticate with (usually not used)
login_host:
description:
- The host running the database
default: localhost
login_port:
description:
- The port to connect to
default: 6379
master_host:
description:
- The host of the master instance [slave command]
master_port:
description:
- The port of the master instance [slave command]
slave_mode:
description:
- the mode of the redis instance [slave command]
default: slave
choices: [ master, slave ]
db:
description:
- The database to flush (used in db mode) [flush command]
flush_mode:
description:
- Type of flush (all the dbs in a redis instance or a specific one)
[flush command]
default: all
choices: [ all, db ]
name:
description:
- A redis config key.
version_added: 1.6
value:
description:
- A redis config value.
version_added: 1.6
notes:
- Requires the redis-py Python package on the remote host. You can
install it with pip (pip install redis) or with a package manager.
https://github.com/andymccurdy/redis-py
- If the redis master instance we are making slave of is password protected
this needs to be in the redis.conf in the masterauth variable
requirements: [ redis ]
author: "Xabier Larrakoetxea (@slok)"
'''
EXAMPLES = '''
- name: Set local redis instance to be slave of melee.island on port 6377
redis:
command: slave
master_host: melee.island
master_port: 6377
- name: Deactivate slave mode
redis:
command: slave
slave_mode: master
- name: Flush all the redis db
redis:
command: flush
flush_mode: all
- name: Flush only one db in a redis instance
redis:
command: flush
db: 1
flush_mode: db
- name: Configure local redis to have 10000 max clients
redis:
command: config
name: maxclients
value: 10000
- name: Configure local redis to have lua time limit of 100 ms
redis:
command: config
name: lua-time-limit
value: 100
'''
import traceback
REDIS_IMP_ERR = None
try:
import redis
except ImportError:
REDIS_IMP_ERR = traceback.format_exc()
redis_found = False
else:
redis_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
# Redis module specific support methods.
def set_slave_mode(client, master_host, master_port):
try:
return client.slaveof(master_host, master_port)
except Exception:
return False
def set_master_mode(client):
try:
return client.slaveof()
except Exception:
return False
def flush(client, db=None):
try:
if not isinstance(db, int):
return client.flushall()
else:
# The passed client has been connected to the database already
return client.flushdb()
except Exception:
return False
# Module execution.
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(type='str', choices=['config', 'flush', 'slave']),
login_password=dict(type='str', no_log=True),
login_host=dict(type='str', default='localhost'),
login_port=dict(type='int', default=6379),
master_host=dict(type='str'),
master_port=dict(type='int'),
slave_mode=dict(type='str', default='slave', choices=['master', 'slave']),
db=dict(type='int'),
flush_mode=dict(type='str', default='all', choices=['all', 'db']),
name=dict(type='str'),
value=dict(type='str')
),
supports_check_mode=True,
)
if not redis_found:
module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
command = module.params['command']
# Slave Command section -----------
if command == "slave":
master_host = module.params['master_host']
master_port = module.params['master_port']
mode = module.params['slave_mode']
# Check if we have all the data
if mode == "slave": # Only need data if we want to be slave
if not master_host:
module.fail_json(msg='In slave mode master host must be provided')
if not master_port:
module.fail_json(msg='In slave mode master port must be provided')
# Connect and check
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
# Check if we are already in the mode that we want
info = r.info()
if mode == "master" and info["role"] == "master":
module.exit_json(changed=False, mode=mode)
elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port:
status = dict(
status=mode,
master_host=master_host,
master_port=master_port,
)
module.exit_json(changed=False, mode=status)
else:
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "slave":
if module.check_mode or\
set_slave_mode(r, master_host, master_port):
info = r.info()
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=True, mode=status)
else:
module.fail_json(msg='Unable to set slave mode')
else:
if module.check_mode or set_master_mode(r):
module.exit_json(changed=True, mode=mode)
else:
module.fail_json(msg='Unable to set master mode')
# flush Command section -----------
elif command == "flush":
db = module.params['db']
mode = module.params['flush_mode']
# Check if we have all the data
if mode == "db":
if db is None:
module.fail_json(msg="In db mode the db number must be provided")
# Connect and check
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "all":
if module.check_mode or flush(r):
module.exit_json(changed=True, flushed=True)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush all databases")
else:
if module.check_mode or flush(r, db):
module.exit_json(changed=True, flushed=True, db=db)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush '%d' database" % db)
elif command == 'config':
name = module.params['name']
value = module.params['value']
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
try:
old_value = r.config_get(name)[name]
except Exception as e:
module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc())
changed = old_value != value
if module.check_mode or not changed:
module.exit_json(changed=changed, name=name, value=value)
else:
try:
r.config_set(name, value)
except Exception as e:
module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, name=name, value=value)
else:
module.fail_json(msg='A valid command must be provided')
if __name__ == '__main__':
main()
|
{
"content_hash": "5051b7ad042711f75ea225d26d9e3887",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 135,
"avg_line_length": 32.66329966329966,
"alnum_prop": 0.5812802803834656,
"repo_name": "thaim/ansible",
"id": "0a27c4e1b17489833d8f2df5aadbd9ac938753e3",
"size": "9867",
"binary": false,
"copies": "41",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/database/misc/redis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
""" QiBuild """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
print("this is foo")
|
{
"content_hash": "4564fb968bd5174b446c010f87ca6025",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 25.833333333333332,
"alnum_prop": 0.7161290322580646,
"repo_name": "aldebaran/qibuild",
"id": "1dba1b80a62ac7991d3a492dbb0296b429eda678",
"size": "352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qipy/test/projects/foomodules/foo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6892"
},
{
"name": "C++",
"bytes": "23130"
},
{
"name": "CMake",
"bytes": "292637"
},
{
"name": "Makefile",
"bytes": "755"
},
{
"name": "Nix",
"bytes": "563"
},
{
"name": "Python",
"bytes": "1581825"
},
{
"name": "SWIG",
"bytes": "306"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from math import floor
from django import template
from django.template.loader import get_template
from django.utils.encoding import force_text
from ..bootstrap import css_url, javascript_url, jquery_url, theme_url
from ..html import link_tag
from ..forms import render_button, render_field, render_field_and_label, render_form, render_form_group, render_formset, \
render_label
from ..icons import render_icon
from ..templates import handle_var, parse_token_contents
register = template.Library()
@register.simple_tag
def bootstrap_jquery_url():
"""
**Tag name**::
bootstrap_jquery_url
Return the full url to jQuery file to use
Default value: ``//code.jquery.com/jquery.min.js``
this value is configurable, see Settings section
**usage**::
{% bootstrap_jquery_url %}
**example**::
{% bootstrap_jquery_url %}
"""
return jquery_url()
@register.simple_tag
def bootstrap_javascript_url():
"""
Return the full url to FIXTHIS
Default value: ``None``
this value is configurable, see Settings section
**Tag name**::
bootstrap_javascript_url
**usage**::
{% bootstrap_javascript_url %}
**example**::
{% bootstrap_javascript_url %}
"""
return javascript_url()
@register.simple_tag
def bootstrap_css_url():
"""
Return the full url to FIXTHIS
Default value: ``None``
this value is configurable, see Settings section
**Tag name**::
bootstrap_css_url
**usage**::
{% bootstrap_css_url %}
**example**::
{% bootstrap_css_url %}
"""
return css_url()
@register.simple_tag
def bootstrap_theme_url():
"""
Return the full url to FIXTHIS
Default value: ``None``
this value is configurable, see Settings section
**Tag name**::
bootstrap_css_url
**usage**::
{% bootstrap_css_url %}
**example**::
{% bootstrap_css_url %}
"""
return theme_url()
@register.simple_tag
def bootstrap_css():
"""
Return HTML for Bootstrap CSS
Adjust url in settings. If no url is returned, we don't want this statement to return any HTML.
This is intended behavior.
Default value: ``FIXTHIS``
this value is configurable, see Settings section
**Tag name**::
bootstrap_css
**usage**::
{% bootstrap_css %}
**example**::
{% bootstrap_css %}
"""
urls = [url for url in [bootstrap_css_url(), bootstrap_theme_url()] if url]
return ''.join([link_tag(url, media='screen') for url in urls])
@register.simple_tag
def bootstrap_javascript(jquery=False):
"""
Return HTML for Bootstrap JavaScript
Adjust url in settings. If no url is returned, we don't want this statement to return any HTML.
This is intended behavior.
Default value: ``None``
this value is configurable, see Settings section
**Tag name**::
bootstrap_javascript
**Parameters**:
:jquery: True to include jquery FIXTHIS
**usage**::
{% bootstrap_javascript FIXTHIS %}
**example**::
{% bootstrap_javascript FIXTHIS %}
"""
javascript = ''
# No async on scripts, not mature enough. See issue #52 and #56
if jquery:
url = bootstrap_jquery_url()
if url:
javascript += '<script src="{url}"></script>'.format(url=url)
url = bootstrap_javascript_url()
if url:
javascript += '<script src="{url}"></script>'.format(url=url)
return javascript
@register.simple_tag
def bootstrap_formset(*args, **kwargs):
"""
Render a formset
**Tag name**::
bootstrap_formset
**Parameters**:
:args:
:kwargs:
**usage**::
{% bootstrap_formset formset FIXTHIS %}
**example**::
{% bootstrap_formset formset FIXTHIS %}
"""
return render_formset(*args, **kwargs)
@register.simple_tag
def bootstrap_form(*args, **kwargs):
"""
Render a form
**Tag name**::
bootstrap_form
**Parameters**:
:args:
:kwargs:
**usage**::
{% bootstrap_form form FIXTHIS %}
**example**::
{% bootstrap_form form FIXTHIS %}
"""
return render_form(*args, **kwargs)
@register.simple_tag
def bootstrap_field(*args, **kwargs):
"""
Render a field
**Tag name**::
bootstrap_field
**Parameters**:
:args:
:kwargs:
**usage**::
{% bootstrap_field form_field FIXTHIS %}
**example**::
{% bootstrap_form form_field FIXTHIS %}
"""
return render_field(*args, **kwargs)
@register.simple_tag()
def bootstrap_label(*args, **kwargs):
"""
Render a label
**Tag name**::
bootstrap_label
**Parameters**:
:args:
:kwargs:
**usage**::
{% bootstrap_label FIXTHIS %}
**example**::
{% bootstrap_label FIXTHIS %}
"""
return render_label(*args, **kwargs)
@register.simple_tag
def bootstrap_button(*args, **kwargs):
"""
Render a button
**Tag name**::
bootstrap_button
**Parameters**:
:args:
:kwargs:
**usage**::
{% bootstrap_button FIXTHIS %}
**example**::
{% bootstrap_button FIXTHIS %}
"""
return render_button(*args, **kwargs)
@register.simple_tag
def bootstrap_icon(icon):
"""
Render an icon
**Tag name**::
bootstrap_icon
**Parameters**:
:icon: icon name
**usage**::
{% bootstrap_icon "icon_name" %}
**example**::
{% bootstrap_icon "star" %}
"""
return render_icon(icon)
@register.tag('buttons')
def bootstrap_buttons(parser, token):
"""
Render buttons for form
**Tag name**::
bootstrap_buttons
**Parameters**:
:parser:
:token:
**usage**::
{% bootstrap_buttons FIXTHIS %}
**example**::
{% bootstrap_buttons FIXTHIS %}
"""
kwargs = parse_token_contents(parser, token)
kwargs['nodelist'] = parser.parse(('endbuttons', ))
parser.delete_first_token()
return ButtonsNode(**kwargs)
class ButtonsNode(template.Node):
def __init__(self, nodelist, args, kwargs, asvar, **kwargs2):
self.nodelist = nodelist
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
output_kwargs = {}
for key in self.kwargs:
output_kwargs[key] = handle_var(self.kwargs[key], context)
buttons = []
submit = output_kwargs.get('submit', None)
reset = output_kwargs.get('reset', None)
if submit:
buttons.append(bootstrap_button(submit, 'submit'))
if reset:
buttons.append(bootstrap_button(reset, 'reset'))
buttons = ' '.join(buttons) + self.nodelist.render(context)
output_kwargs.update({
'label': None,
'field': buttons,
})
output = render_form_group(render_field_and_label(**output_kwargs))
if self.asvar:
context[self.asvar] = output
return ''
else:
return output
@register.simple_tag(takes_context=True)
def bootstrap_messages(context, *args, **kwargs):
"""
Show django.contrib.messages Messages in Bootstrap alert containers
**Tag name**::
bootstrap_messages
**Parameters**:
:context:
:args:
:kwargs:
**usage**::
{% bootstrap_messages FIXTHIS %}
**example**::
{% bootstrap_messages FIXTHIS %}
"""
return get_template('bootstrap3/messages.html').render(context)
@register.inclusion_tag('bootstrap3/pagination.html')
def bootstrap_pagination(page, **kwargs):
"""
Render pagination for a page
**Tag name**::
bootstrap_pagination
**Parameters**:
:page:
:kwargs:
**usage**::
{% bootstrap_pagination FIXTHIS %}
**example**::
{% bootstrap_pagination FIXTHIS %}
"""
pagination_kwargs = kwargs.copy()
pagination_kwargs['page'] = page
return get_pagination_context(**pagination_kwargs)
def get_pagination_context(page, pages_to_show=11,
url=None, size=None, extra=None):
"""
Generate Bootstrap pagination context from a page object
"""
pages_to_show = int(pages_to_show)
if pages_to_show < 1:
raise ValueError("Pagination pages_to_show should be a positive " +
"integer, you specified {pages}".format(pages=pages_to_show))
num_pages = page.paginator.num_pages
current_page = page.number
half_page_num = int(floor(pages_to_show / 2)) - 1
if half_page_num < 0:
half_page_num = 0
first_page = current_page - half_page_num
if first_page <= 1:
first_page = 1
if first_page > 1:
pages_back = first_page - half_page_num
if pages_back < 1:
pages_back = 1
else:
pages_back = None
last_page = first_page + pages_to_show - 1
if pages_back is None:
last_page += 1
if last_page > num_pages:
last_page = num_pages
if last_page < num_pages:
pages_forward = last_page + half_page_num
if pages_forward > num_pages:
pages_forward = num_pages
else:
pages_forward = None
if first_page > 1:
first_page -= 1
if pages_back is not None and pages_back > 1:
pages_back -= 1
else:
pages_back = None
pages_shown = []
for i in range(first_page, last_page + 1):
pages_shown.append(i)
# Append proper character to url
if url:
# Remove existing page GET parameters
url = force_text(url)
url = re.sub(r'\?page\=[^\&]+', '?', url)
url = re.sub(r'\&page\=[^\&]+', '', url)
# Append proper separator
if '?' in url:
url += '&'
else:
url += '?'
# Append extra string to url
if extra:
if not url:
url = '?'
url += force_text(extra) + '&'
if url:
url = url.replace('?&', '?')
# Set CSS classes,see twitter.github.io/bootstrap/components.html#pagination
pagination_css_classes = ['pagination']
if size == 'small':
pagination_css_classes.append('pagination-sm')
elif size == 'large':
pagination_css_classes.append('pagination-lg')
# Build context object
return {
'bootstrap_pagination_url': url,
'num_pages': num_pages,
'current_page': current_page,
'first_page': first_page,
'last_page': last_page,
'pages_shown': pages_shown,
'pages_back': pages_back,
'pages_forward': pages_forward,
'pagination_css_classes': ' '.join(pagination_css_classes),
}
|
{
"content_hash": "6c35af5fec9b7677ba07e5c012f25a71",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 122,
"avg_line_length": 21.434535104364326,
"alnum_prop": 0.554621104815864,
"repo_name": "rumz/pis-system",
"id": "a13d65500e860fe3acfcf5b54bbef42d35089bc9",
"size": "11296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pis_system/bootstrap3/templatetags/bootstrap3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "954"
},
{
"name": "HTML",
"bytes": "13171"
},
{
"name": "JavaScript",
"bytes": "6045"
},
{
"name": "Python",
"bytes": "54395"
}
],
"symlink_target": ""
}
|
import os
import logging
import socket
from dessn.framework.fitter import Fitter
from dessn.framework.models.approx_model import ApproximateModel
from dessn.framework.simulations.snana_bulk import SNANABulkSimulation
from dessn.framework.simulations.selection_effects import lowz_sel, des_sel
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
plot_dir = os.path.dirname(os.path.abspath(__file__)) + "/plots/%s/" % os.path.basename(__file__)[:-3]
dir_name = plot_dir + "output/"
pfn = plot_dir + os.path.basename(__file__)[:-3]
file = os.path.abspath(__file__)
print(dir_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
model = ApproximateModel()
# Turn off mass and skewness for easy test
simulation = [SNANABulkSimulation(152, sim="SHINTON_LOWZ_MATRIX_G10_SYMC_SYMX1", manual_selection=lowz_sel(), num_calib=50),
SNANABulkSimulation(208, sim="SHINTON_DES_MATRIX_G10_SYMC_SYMX1", manual_selection=des_sel(), num_calib=21)]
fitter = Fitter(dir_name)
fitter.set_models(model)
fitter.set_simulations(simulation)
fitter.set_num_cosmologies(120)
fitter.set_num_walkers(1)
fitter.set_max_steps(5000)
h = socket.gethostname()
if h != "smp-hk5pn72": # The hostname of my laptop. Only will work for me, ha!
fitter.fit(file)
else:
from chainconsumer import ChainConsumer
m, s, chain, truth, weight, old_weight, posterior = fitter.load()
c = ChainConsumer()
c.add_chain(chain, weights=weight, posterior=posterior, name="Approx")
c.configure(spacing=1.0)
parameters = [r"$\Omega_m$", r"$\alpha$", r"$\beta$", r"$\langle M_B \rangle$"]
print(c.analysis.get_latex_table(transpose=True))
c.plotter.plot(filename=pfn + ".png", truth=truth, parameters=parameters)
print("Plotting distributions")
c = ChainConsumer()
c.add_chain(chain, weights=weight, posterior=posterior, name="Approx")
c.configure(label_font_size=10, tick_font_size=10, diagonal_tick_labels=False)
c.plotter.plot_distributions(filename=pfn + "_dist.png", truth=truth, col_wrap=8)
|
{
"content_hash": "78634515e102d65685799936c6c76afc",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 128,
"avg_line_length": 42.01923076923077,
"alnum_prop": 0.6718535469107552,
"repo_name": "dessn/sn-bhm",
"id": "1c0c8e02edcad0b718dd139edf2affab99db515b",
"size": "2185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dessn/configurations/old/approximate_bulk_g10_gauss_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "316"
},
{
"name": "HTML",
"bytes": "140"
},
{
"name": "Python",
"bytes": "342893"
},
{
"name": "Shell",
"bytes": "2079"
},
{
"name": "Stan",
"bytes": "59737"
},
{
"name": "TeX",
"bytes": "595827"
}
],
"symlink_target": ""
}
|
import operator
from datetime import datetime, timedelta
import warnings
from itertools import product, starmap
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas.util.testing as tm
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning, NullFrequencyError
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.core import ops
from pandas import (
Timestamp, Timedelta, Period, Series, date_range, NaT,
DatetimeIndex, TimedeltaIndex)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64DataFrameComparison(object):
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# GH#15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
def test_dt64_nat_comparison(self):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
ts = pd.Timestamp.now()
df = pd.DataFrame([ts, pd.NaT])
expected = pd.DataFrame([True, False])
result = df == ts
tm.assert_frame_equal(result, expected)
class TestDatetime64SeriesComparison(object):
# TODO: moved from tests.series.test_operators; needs cleanup
def test_comparison_invalid(self):
# GH#4968
# invalid date/int comparisons
ser = Series(range(5))
ser2 = Series(pd.date_range('20010101', periods=5))
for (x, y) in [(ser, ser2), (ser2, ser)]:
result = x == y
expected = Series([False] * 5)
tm.assert_series_equal(result, expected)
result = x != y
expected = Series([True] * 5)
tm.assert_series_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
@pytest.mark.parametrize('data', [
[Timestamp('2011-01-01'), NaT, Timestamp('2011-01-03')],
[Timedelta('1 days'), NaT, Timedelta('3 days')],
[Period('2011-01', freq='M'), NaT, Period('2011-03', freq='M')]
])
@pytest.mark.parametrize('dtype', [None, object])
def test_nat_comparisons_scalar(self, dtype, data):
left = Series(data, dtype=dtype)
expected = Series([False, False, False])
tm.assert_series_equal(left == NaT, expected)
tm.assert_series_equal(NaT == left, expected)
expected = Series([True, True, True])
tm.assert_series_equal(left != NaT, expected)
tm.assert_series_equal(NaT != left, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left < NaT, expected)
tm.assert_series_equal(NaT > left, expected)
tm.assert_series_equal(left <= NaT, expected)
tm.assert_series_equal(NaT >= left, expected)
tm.assert_series_equal(left > NaT, expected)
tm.assert_series_equal(NaT < left, expected)
tm.assert_series_equal(left >= NaT, expected)
tm.assert_series_equal(NaT <= left, expected)
def test_series_comparison_scalars(self):
series = Series(date_range('1/1/2000', periods=10))
val = datetime(2000, 1, 4)
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
val = series[5]
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
def test_dt64_ser_cmp_date_warning(self):
# https://github.com/pandas-dev/pandas/issues/21359
# Remove this test and enble invalid test below
ser = pd.Series(pd.date_range('20010101', periods=10), name='dates')
date = ser.iloc[0].to_pydatetime().date()
with tm.assert_produces_warning(FutureWarning) as m:
result = ser == date
expected = pd.Series([True] + [False] * 9, name='dates')
tm.assert_series_equal(result, expected)
assert "Comparing Series of datetimes " in str(m[0].message)
assert "will not compare equal" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser != date
tm.assert_series_equal(result, ~expected)
assert "will not compare equal" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser <= date
tm.assert_series_equal(result, expected)
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser < date
tm.assert_series_equal(result, pd.Series([False] * 10, name='dates'))
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser >= date
tm.assert_series_equal(result, pd.Series([True] * 10, name='dates'))
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser > date
tm.assert_series_equal(result, pd.Series([False] + [True] * 9,
name='dates'))
assert "a TypeError will be raised" in str(m[0].message)
@pytest.mark.skip(reason="GH#21359")
def test_dt64ser_cmp_date_invalid(self):
# GH#19800 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
ser = pd.Series(pd.date_range('20010101', periods=10), name='dates')
date = ser.iloc[0].to_pydatetime().date()
assert not (ser == date).any()
assert (ser != date).all()
with pytest.raises(TypeError):
ser > date
with pytest.raises(TypeError):
ser < date
with pytest.raises(TypeError):
ser >= date
with pytest.raises(TypeError):
ser <= date
def test_dt64ser_cmp_period_scalar(self):
ser = Series(pd.period_range('2000-01-01', periods=10, freq='D'))
val = Period('2000-01-04', freq='D')
result = ser > val
expected = Series([x > val for x in ser])
tm.assert_series_equal(result, expected)
val = ser[5]
result = ser > val
expected = Series([x > val for x in ser])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("left,right", [
("lt", "gt"),
("le", "ge"),
("eq", "eq"),
("ne", "ne"),
])
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp("nat")
ser[3] = pd.Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_timestamp_equality(self):
# GH#11034
ser = pd.Series([pd.Timestamp('2000-01-29 01:59:00'), 'NaT'])
result = ser != ser
tm.assert_series_equal(result, pd.Series([False, True]))
result = ser != ser[0]
tm.assert_series_equal(result, pd.Series([False, True]))
result = ser != ser[1]
tm.assert_series_equal(result, pd.Series([True, True]))
result = ser == ser
tm.assert_series_equal(result, pd.Series([True, False]))
result = ser == ser[0]
tm.assert_series_equal(result, pd.Series([True, False]))
result = ser == ser[1]
tm.assert_series_equal(result, pd.Series([False, False]))
class TestDatetimeIndexComparisons(object):
@pytest.mark.parametrize('other', [datetime(2016, 1, 1),
Timestamp('2016-01-01'),
np.datetime64('2016-01-01')])
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
def dti_cmp_non_datetime(self, tz_naive_fixture):
# GH#19301 by convention datetime.date is not considered comparable
# to Timestamp or DatetimeIndex. This may change in the future.
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
other = datetime(2016, 1, 1).date()
assert not (dti == other).any()
assert (dti != other).all()
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_eq_null_scalar(self, other, tz_naive_fixture):
# GH#19301
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert not (dti == other).any()
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_ne_null_scalar(self, other, tz_naive_fixture):
# GH#19301
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert (dti != other).all()
@pytest.mark.parametrize('other', [None, np.nan])
def test_dti_cmp_null_scalar_inequality(self, tz_naive_fixture, other):
# GH#19301
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
def test_dti_cmp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for lhs, rhs in [(left, right),
(left.astype(object), right.astype(object))]:
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_comparison_tzawareness_compat(self, op):
# GH#18162
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
with pytest.raises(TypeError):
op(dr, dz)
with pytest.raises(TypeError):
op(dr, list(dz))
with pytest.raises(TypeError):
op(dz, dr)
with pytest.raises(TypeError):
op(dz, list(dr))
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
assert (dr == dr).all()
assert (dr == list(dr)).all()
assert (dz == dz).all()
assert (dz == list(dz)).all()
# Check comparisons against scalar Timestamps
ts = pd.Timestamp('2000-03-14 01:59')
ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
assert (dr > ts).all()
with pytest.raises(TypeError):
op(dr, ts_tz)
assert (dz > ts_tz).all()
with pytest.raises(TypeError):
op(dz, ts)
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError):
op(ts, dz)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
@pytest.mark.parametrize('other', [datetime(2016, 1, 1),
Timestamp('2016-01-01'),
np.datetime64('2016-01-01')])
def test_scalar_comparison_tzawareness(self, op, other, tz_aware_fixture):
tz = tz_aware_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
with pytest.raises(TypeError):
op(dti, other)
with pytest.raises(TypeError):
op(other, dti)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize('US/Pacific'), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range('1/1/2000', periods=10, tz=tz)
other = '1/1/2000'
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('other', ['foo', 99, 4.0,
object(), timedelta(days=2)])
def test_dti_cmp_scalar_invalid(self, other, tz_naive_fixture):
# GH#22074
tz = tz_naive_fixture
rng = date_range('1/1/2000', periods=10, tz=tz)
result = rng == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(TypeError):
rng < other
with pytest.raises(TypeError):
rng <= other
with pytest.raises(TypeError):
rng > other
with pytest.raises(TypeError):
rng >= other
def test_dti_cmp_list(self):
rng = date_range('1/1/2000', periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('other', [
pd.timedelta_range('1D', periods=10),
pd.timedelta_range('1D', periods=10).to_series(),
pd.timedelta_range('1D', periods=10).asi8.view('m8[ns]')
], ids=lambda x: type(x).__name__)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range('2000-01-01', periods=10, tz='Asia/Tokyo')
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range('2000-01-01', periods=10, tz='Asia/Tokyo')
other = dti.astype('O')
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
with pytest.raises(TypeError):
# tzawareness failure
dti != other
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(TypeError):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestFrameArithmetic(object):
def test_dt64arr_sub_dtscalar(self, box):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = pd.date_range('2013-01-01', periods=3)
idx = tm.box_expected(idx, box)
ts = pd.Timestamp('2013-01-01')
# TODO: parametrize over scalar types
expected = pd.TimedeltaIndex(['0 Days', '1 Day', '2 Days'])
expected = tm.box_expected(expected, box)
result = idx - ts
tm.assert_equal(result, expected)
def test_df_sub_datetime64_not_ns(self):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
df = pd.DataFrame(pd.date_range('20130101', periods=3))
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
res = df - dt64
expected = pd.DataFrame([pd.Timedelta(days=0), pd.Timedelta(days=1),
pd.Timedelta(days=2)])
tm.assert_frame_equal(res, expected)
class TestTimestampSeriesArithmetic(object):
def test_timestamp_sub_series(self):
ser = pd.Series(pd.date_range('2014-03-17', periods=2, freq='D',
tz='US/Eastern'))
ts = ser[0]
delta_series = pd.Series([np.timedelta64(0, 'D'),
np.timedelta64(1, 'D')])
tm.assert_series_equal(ser - ts, delta_series)
tm.assert_series_equal(ts - ser, -delta_series)
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == 'timedelta64[ns]'
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
@pytest.mark.parametrize('klass', [Series, pd.Index])
def test_sub_datetime64_not_ns(self, klass):
# GH#7996
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
obj = klass(date_range('20130101', periods=3))
res = obj - dt64
expected = klass([Timedelta(days=0), Timedelta(days=1),
Timedelta(days=2)])
tm.assert_equal(res, expected)
res = dt64 - obj
tm.assert_equal(res, -expected)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range('1999-09-30', periods=10, tz='US/Pacific')
ser = pd.Series(dti)
expected = pd.Series(pd.TimedeltaIndex(['0days'] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see gh-14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta('1 days'), pd.NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_addsub_timedelta(self):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'),
Timestamp('20130101 9:02:01')])
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'),
Timestamp('20130101 9:02:00.005')])
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
def test_dt64_series_add_tick_DateOffset(self):
# GH 4532
# operate with pd.offsets
ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
expected = Series([Timestamp('20130101 9:01:05'),
Timestamp('20130101 9:02:05')])
result = ser + pd.offsets.Second(5)
tm.assert_series_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_series_equal(result2, expected)
def test_dt64_series_sub_tick_DateOffset(self):
# GH 4532
# operate with pd.offsets
ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
expected = Series([Timestamp('20130101 9:00:55'),
Timestamp('20130101 9:01:55')])
result = ser - pd.offsets.Second(5)
tm.assert_series_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_series_equal(result2, expected)
with pytest.raises(TypeError):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize('cls_name', ['Day', 'Hour', 'Minute', 'Second',
'Milli', 'Micro', 'Nano'])
def test_dt64_series_add_tick_DateOffset_smoke(self, cls_name):
# GH 4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'),
Timestamp('20130101 9:02:00.005')])
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'),
Timestamp('20130101 9:07:00.005')])
tm.assert_series_equal(result, expected)
def test_dt64_series_sub_NaT(self):
# GH#18808
dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp('19900315')])
ser = pd.Series(dti)
res = ser - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]')
tm.assert_series_equal(res, expected)
dti_tz = dti.tz_localize('Asia/Tokyo')
ser_tz = pd.Series(dti_tz)
res = ser_tz - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]')
tm.assert_series_equal(res, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by #19024
dt = pd.Timestamp('1700-01-31')
td = pd.Timedelta('20000 Days')
dti = pd.date_range('1949-09-30', freq='100Y', periods=4)
ser = pd.Series(dti)
with pytest.raises(OverflowError):
ser - dt
with pytest.raises(OverflowError):
dt - ser
with pytest.raises(OverflowError):
ser + td
with pytest.raises(OverflowError):
td + ser
ser.iloc[-1] = pd.NaT
expected = pd.Series(['2004-10-03', '2104-10-04', '2204-10-04', 'NaT'],
dtype='datetime64[ns]')
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
expected = pd.Series(['91279 Days', 'NaT', 'NaT', 'NaT'],
dtype='timedelta64[ns]')
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetime64_ops_nat(self):
# GH 11349
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
# subtraction
tm.assert_series_equal(-NaT + datetime_series,
nat_series_dtype_timestamp)
with pytest.raises(TypeError):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with pytest.raises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
tm.assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
tm.assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
tm.assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize('dt64_series', [
Series([Timestamp('19900315'), Timestamp('19900315')]),
Series([pd.NaT, Timestamp('19900315')]),
Series([pd.NaT, pd.NaT], dtype='datetime64[ns]')])
@pytest.mark.parametrize('one', [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
with pytest.raises(TypeError):
dt64_series * one
with pytest.raises(TypeError):
one * dt64_series
# division
with pytest.raises(TypeError):
dt64_series / one
with pytest.raises(TypeError):
one / dt64_series
@pytest.mark.parametrize('op', ['__add__', '__radd__',
'__sub__', '__rsub__'])
@pytest.mark.parametrize('tz', [None, 'Asia/Tokyo'])
def test_dt64_series_add_intlike(self, tz, op):
# GH#19123
dti = pd.DatetimeIndex(['2016-01-02', '2016-02-03', 'NaT'], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype='uint8')
pytest.raises(TypeError, getattr(ser, op), 1)
pytest.raises(TypeError, getattr(ser, op), other)
pytest.raises(TypeError, getattr(ser, op), other.values)
pytest.raises(TypeError, getattr(ser, op), pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError):
td1 - dt1
with pytest.raises(TypeError):
td2 - dt2
class TestDatetimeIndexArithmetic(object):
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub])
def test_dti_add_sub_float(self, op, other):
dti = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
with pytest.raises(TypeError):
op(dti, other)
def test_dti_add_timestamp_raises(self, box):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
idx = tm.box_expected(idx, box)
msg = "cannot add"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz_naive_fixture, one):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz_naive_fixture, one):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz_naive_fixture, one):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('freq', ['H', 'D'])
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_intarray_tick(self, box, freq):
# GH#19959
dti = pd.date_range('2016-01-01', periods=2, freq=freq)
other = box([4, -1])
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))])
result = dti + other
tm.assert_index_equal(result, expected)
result = other + dti
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq', ['W', 'M', 'MS', 'Q'])
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, box, freq):
# GH#19959
dti = pd.date_range('2016-01-01', periods=2, freq=freq)
other = box([4, -1])
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))])
with tm.assert_produces_warning(PerformanceWarning):
result = dti + other
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = other + dti
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, box):
# GH#19959
dti = pd.DatetimeIndex(['2016-01-01', 'NaT', '2017-04-05 06:07:08'])
other = box([9, 4, -1])
with pytest.raises(NullFrequencyError):
dti + other
with pytest.raises(NullFrequencyError):
other + dti
with pytest.raises(NullFrequencyError):
dti - other
with pytest.raises(TypeError):
other - dti
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz_naive_fixture, two_hours, box):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
rng = tm.box_expected(rng, box)
result = rng + two_hours
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
expected = tm.box_expected(expected, box)
tm.assert_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz_naive_fixture, two_hours):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += two_hours
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz_naive_fixture, two_hours):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - two_hours
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz_naive_fixture, two_hours):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= two_hours
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract .*TimedeltaIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot subtract DatetimeIndex from'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract .*TimedeltaIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types',
'cannot subtract DatetimeIndex from'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize('addend', [
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')
], ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
def test_add_datetimelike_and_dti(self, addend, tz):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize(tz)
msg = 'cannot add DatetimeIndex and {0}'.format(type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
# -------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_dti_add_dt64_array_raises(self, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=3, tz=tz)
dtarr = dti.values
with pytest.raises(TypeError):
dti + dtarr
with pytest.raises(TypeError):
dtarr + dti
def test_dti_sub_dt64_array_naive(self):
dti = pd.date_range('2016-01-01', periods=3, tz=None)
dtarr = dti.values
expected = dti - dti
result = dti - dtarr
tm.assert_index_equal(result, expected)
result = dtarr - dti
tm.assert_index_equal(result, expected)
def test_dti_sub_dt64_array_aware_raises(self, tz_naive_fixture):
tz = tz_naive_fixture
if tz is None:
return
dti = pd.date_range('2016-01-01', periods=3, tz=tz)
dtarr = dti.values
with pytest.raises(TypeError):
dti - dtarr
with pytest.raises(TypeError):
dtarr - dti
def test_dti_add_td64_array(self, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=3, tz=tz)
tdi = pd.TimedeltaIndex(['-1 Day', '-1 Day', '-1 Day'])
tdarr = tdi.values
expected = dti + tdi
result = dti + tdarr
tm.assert_index_equal(result, expected)
result = tdarr + dti
tm.assert_index_equal(result, expected)
def test_dti_sub_td64_array(self, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=3, tz=tz)
tdi = pd.TimedeltaIndex(['-1 Day', '-1 Day', '-1 Day'])
tdarr = tdi.values
expected = dti - tdi
result = dti - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - dti
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq', [None, 'D'])
def test_sub_period(self, freq, box):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub])
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('dti_freq', [None, 'D'])
def test_dti_sub_pi(self, dti_freq, pi_freq, op, box_df_broadcast_failure):
# GH#20049 subtracting PeriodIndex should raise TypeError
box = box_df_broadcast_failure
dti = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=dti_freq)
pi = dti.to_period(pi_freq)
dti = tm.box_expected(dti, box)
# TODO: Also box pi?
with pytest.raises(TypeError):
op(dti, pi)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize('op', [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series([Timestamp('20130301'),
Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'),
Timestamp('20130228 21:00:00')])
intervals = ['D', 'h', 'm', 's', 'us']
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
tm.assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
tm.assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
tm.assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with pytest.raises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
tm.assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
tm.assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
ts_neg = pd.to_datetime(['1950-01-01', '1950-01-01'])
ts_pos = pd.to_datetime(['1980-01-01', '1980-01-01'])
# General tests
expected = pd.Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = pd.Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
with pytest.raises(OverflowError):
dtimax - ts_neg
with pytest.raises(OverflowError):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([pd.Timestamp.min])
t1 = tmin + pd.Timedelta.max + pd.Timedelta('1us')
with pytest.raises(OverflowError):
t1 - tmin
tmax = pd.to_datetime([pd.Timestamp.max])
t2 = tmax + pd.Timedelta.min - pd.Timedelta('1us')
with pytest.raises(OverflowError):
tmax - t2
@pytest.mark.parametrize('names', [('foo', None, None),
('baz', 'bar', None),
('bar', 'bar', 'bar')])
@pytest.mark.parametrize('tz', [None, 'America/Chicago'])
def test_dti_add_series(self, tz, names):
# GH#13905
index = DatetimeIndex(['2016-06-28 05:30', '2016-06-28 05:31'],
tz=tz, name=names[0])
ser = Series([Timedelta(seconds=5)] * 2,
index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5),
index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
def test_dti_add_offset_array(self, tz_naive_fixture):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_add_offset_index(self, tz_naive_fixture, names):
# GH#18849, GH#19744
tz = tz_naive_fixture
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
def test_dti_sub_offset_array(self, tz_naive_fixture):
# GH#18824
tz = tz_naive_fixture
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_sub_offset_index(self, tz_naive_fixture, names):
# GH#18824, GH#19744
tz = tz_naive_fixture
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz_naive_fixture, names):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
def test_dti_add_offset_tzaware(self, tz_aware_fixture, box):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
timezone = tz_aware_fixture
if timezone == 'US/Pacific':
dates = date_range('2012-11-01', periods=3, tz=timezone)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range('2010-11-01 00:00',
periods=3, tz=timezone, freq='H')
expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00',
'2010-11-01 07:00'], freq='H', tz=timezone)
dates = tm.box_expected(dates, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over the scalar being added? radd? sub?
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, 'h')
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
@pytest.mark.parametrize('klass', [Series, DatetimeIndex])
def test_dt64_with_offset_array(klass):
# GH#10699
# array of offsets
box = Series if klass is Series else pd.Index
dti = DatetimeIndex([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
s = klass(dti)
with tm.assert_produces_warning(PerformanceWarning):
result = s + box([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')])
tm.assert_equal(result, exp)
# same offset
result = s + box([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
tm.assert_equal(result, exp)
@pytest.mark.parametrize('klass', [Series, DatetimeIndex])
def test_dt64_with_DateOffsets_relativedelta(klass):
# GH#10699
vec = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = pd.DateOffset(**dict([kwd]))
tm.assert_equal(klass([x + op for x in vec]), vec + op)
tm.assert_equal(klass([x - op for x in vec]), vec - op)
op = pd.DateOffset(**dict(relative_kwargs[:i + 1]))
tm.assert_equal(klass([x + op for x in vec]), vec + op)
tm.assert_equal(klass([x - op for x in vec]), vec - op)
@pytest.mark.parametrize('cls_and_kwargs', [
'YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'Week', ('Week', {'weekday': 6}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0, 'startingMonth': 2, 'variation': 'nearest'}),
('WeekOfMonth', {'weekday': 2, 'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})])
@pytest.mark.parametrize('normalize', [True, False])
@pytest.mark.parametrize('klass', [Series, DatetimeIndex])
def test_dt64_with_DateOffsets(klass, normalize, cls_and_kwargs):
# GH#10699
# assert these are equal on a piecewise basis
vec = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
for n in [0, 5]:
if (cls_name in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
# passing n = 0 is invalid for these offset classes
continue
offset = offset_cls(n, normalize=normalize, **kwargs)
tm.assert_equal(klass([x + offset for x in vec]), vec + offset)
tm.assert_equal(klass([x - offset for x in vec]), vec - offset)
tm.assert_equal(klass([offset + x for x in vec]), offset + vec)
@pytest.mark.parametrize('klass', [Series, DatetimeIndex])
def test_datetime64_with_DateOffset(klass):
# GH#10699
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
tm.assert_equal(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize('years', [-1, 0, 1])
@pytest.mark.parametrize('months', [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31')])
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months)
for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
|
{
"content_hash": "b75568a75c5218830f1a2473aa6bc80f",
"timestamp": "",
"source": "github",
"line_count": 1864,
"max_line_length": 79,
"avg_line_length": 39.43562231759657,
"alnum_prop": 0.5547967568155847,
"repo_name": "cython-testbed/pandas",
"id": "36bb0aca066fbffda83e038d300115d493081fb5",
"size": "73679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/arithmetic/test_datetime64.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4907"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14136208"
},
{
"name": "Shell",
"bytes": "27731"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
"""Tests for Vanderbilt SPC component."""
from unittest.mock import patch, PropertyMock, Mock
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.spc import DATA_API
from homeassistant.const import (STATE_ALARM_ARMED_AWAY, STATE_ALARM_DISARMED)
from tests.common import mock_coro
async def test_valid_device_config(hass, monkeypatch):
"""Test valid device config."""
config = {
'spc': {
'api_url': 'http://localhost/',
'ws_url': 'ws://localhost/'
}
}
with patch('pyspcwebgw.SpcWebGateway.async_load_parameters',
return_value=mock_coro(True)):
assert await async_setup_component(hass, 'spc', config) is True
async def test_invalid_device_config(hass, monkeypatch):
"""Test valid device config."""
config = {
'spc': {
'api_url': 'http://localhost/'
}
}
with patch('pyspcwebgw.SpcWebGateway.async_load_parameters',
return_value=mock_coro(True)):
assert await async_setup_component(hass, 'spc', config) is False
async def test_update_alarm_device(hass):
"""Test that alarm panel state changes on incoming websocket data."""
import pyspcwebgw
from pyspcwebgw.const import AreaMode
config = {
'spc': {
'api_url': 'http://localhost/',
'ws_url': 'ws://localhost/'
}
}
area_mock = Mock(spec=pyspcwebgw.area.Area, id='1',
mode=AreaMode.FULL_SET, last_changed_by='Sven')
area_mock.name = 'House'
area_mock.verified_alarm = False
with patch('pyspcwebgw.SpcWebGateway.areas',
new_callable=PropertyMock) as mock_areas:
mock_areas.return_value = {'1': area_mock}
with patch('pyspcwebgw.SpcWebGateway.async_load_parameters',
return_value=mock_coro(True)):
assert await async_setup_component(hass, 'spc', config) is True
await hass.async_block_till_done()
entity_id = 'alarm_control_panel.house'
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
assert hass.states.get(entity_id).attributes['changed_by'] == 'Sven'
area_mock.mode = AreaMode.UNSET
area_mock.last_changed_by = 'Anna'
await hass.data[DATA_API]._async_callback(area_mock)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
assert hass.states.get(entity_id).attributes['changed_by'] == 'Anna'
|
{
"content_hash": "35e661e02b9e78aedc53dfc743cb903f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 33.78378378378378,
"alnum_prop": 0.6424,
"repo_name": "persandstrom/home-assistant",
"id": "d4bedda4e967e7b1739785b433ba1e43fc2c5305",
"size": "2500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/components/test_spc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
}
|
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
class MongoDBPipeline(object):
def __init__(self):
connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
db = connection[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_COLLECTION']]
def process_item(self, item, spider):
if not self.collection.find_one({'imdb_id': item['imdb_id']}):
self.collection.insert(item)
return item
|
{
"content_hash": "9800e2ac8391b7b94596d6a974657d98",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 27.714285714285715,
"alnum_prop": 0.6408934707903781,
"repo_name": "Nathx/parental_advisory_ml",
"id": "7f01262fa3a09481d58c1eca876f8fd73c1d3d96",
"size": "776",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scraping/imdb/imdb/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60644"
},
{
"name": "Shell",
"bytes": "1928"
}
],
"symlink_target": ""
}
|
"""This library contains classes for launching graphs and executing operations.
The [basic usage](../../get_started/index.md#basic-usage) guide has
examples of how a graph is launched in a [`tf.Session`](#Session).
## Session management
@@Session
@@InteractiveSession
@@get_default_session
## Error classes
@@OpError
@@CancelledError
@@UnknownError
@@InvalidArgumentError
@@DeadlineExceededError
@@NotFoundError
@@AlreadyExistsError
@@PermissionDeniedError
@@UnauthenticatedError
@@ResourceExhaustedError
@@FailedPreconditionError
@@AbortedError
@@OutOfRangeError
@@UnimplementedError
@@InternalError
@@UnavailableError
@@DataLossError
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# NOTE(mrry): Support for `tf.GrpcServer` is currently experimental.
from tensorflow.core.protobuf.tensorflow_server_pb2 import ClusterDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import JobDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python.client.server_lib import ClusterSpec
from tensorflow.python.client.server_lib import GrpcServer
from tensorflow.python.client.session import InteractiveSession
from tensorflow.python.client.session import Session
from tensorflow.python.framework import errors
from tensorflow.python.framework.errors import OpError
from tensorflow.python.framework.ops import get_default_session
|
{
"content_hash": "b059965facf48466c914b432cce0effd",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 28.15686274509804,
"alnum_prop": 0.8182451253481894,
"repo_name": "awni/tensorflow",
"id": "37f8fe29946bd811440e55305563d0406791a160",
"size": "2146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/client/client_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156098"
},
{
"name": "C++",
"bytes": "7765982"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "684124"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "7188"
},
{
"name": "Jupyter Notebook",
"bytes": "1771787"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "103762"
},
{
"name": "Python",
"bytes": "4675299"
},
{
"name": "Shell",
"bytes": "126103"
},
{
"name": "TypeScript",
"bytes": "342627"
}
],
"symlink_target": ""
}
|
class AgentGlobals(object):
"""
This class is used for setting AgentGlobals which can be used all throughout the Agent.
"""
GUID_ZERO = "00000000-0000-0000-0000-000000000000"
#
# Some modules (e.g. telemetry) require an up-to-date container ID. We update this variable each time we
# fetch the goal state.
#
_container_id = GUID_ZERO
@staticmethod
def get_container_id():
return AgentGlobals._container_id
@staticmethod
def update_container_id(container_id):
AgentGlobals._container_id = container_id
|
{
"content_hash": "f342e9371e91cb8f09d51af708d5896b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 108,
"avg_line_length": 28.65,
"alnum_prop": 0.6736474694589878,
"repo_name": "Azure/WALinuxAgent",
"id": "dbfda92d9dcfa019126afca1edf603da9c48f3c0",
"size": "1234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azurelinuxagent/common/AgentGlobals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3073264"
},
{
"name": "Shell",
"bytes": "19249"
}
],
"symlink_target": ""
}
|
from msrest.paging import Paged
class DataMaskingRulePaged(Paged):
"""
A paging container for iterating over a list of :class:`DataMaskingRule <azure.mgmt.sql.models.DataMaskingRule>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[DataMaskingRule]'}
}
def __init__(self, *args, **kwargs):
super(DataMaskingRulePaged, self).__init__(*args, **kwargs)
|
{
"content_hash": "8807295f0af4e1b91b7a7e82567a93e3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 123,
"avg_line_length": 29.8125,
"alnum_prop": 0.6184486373165619,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "4e1eed7b9f516b90aea2317dc18ce3f9b5552290",
"size": "951",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-sql/azure/mgmt/sql/models/data_masking_rule_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
"""
Created on May 17, 2013
@author: tanel
"""
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
GObject.threads_init()
Gst.init(None)
import logging
import thread
import os
logger = logging.getLogger(__name__)
import pdb
class DecoderPipeline2(object):
def __init__(self, conf={}):
logger.info("Creating decoder using conf: %s" % conf)
self.create_pipeline(conf)
self.outdir = conf.get("out-dir", None)
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
elif not os.path.isdir(self.outdir):
raise Exception("Output directory %s already exists as a file" % self.outdir)
self.result_handler = None
self.full_result_handler = None
self.eos_handler = None
self.error_handler = None
self.request_id = "<undefined>"
def create_pipeline(self, conf):
self.appsrc = Gst.ElementFactory.make("appsrc", "appsrc")
self.decodebin = Gst.ElementFactory.make("decodebin", "decodebin")
self.audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert")
self.audioresample = Gst.ElementFactory.make("audioresample", "audioresample")
self.tee = Gst.ElementFactory.make("tee", "tee")
self.queue1 = Gst.ElementFactory.make("queue", "queue1")
self.filesink = Gst.ElementFactory.make("filesink", "filesink")
self.queue2 = Gst.ElementFactory.make("queue", "queue2")
self.asr = Gst.ElementFactory.make("kaldinnet2onlinedecoder", "asr")
self.fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
# This needs to be set first
if "use-threaded-decoder" in conf["decoder"]:
self.asr.set_property("use-threaded-decoder", conf["decoder"]["use-threaded-decoder"])
for (key, val) in conf.get("decoder", {}).iteritems():
if key != "use-threaded-decoder":
logger.info("Setting decoder property: %s = %s" % (key, val))
self.asr.set_property(key, val)
self.appsrc.set_property("is-live", True)
self.filesink.set_property("location", "/dev/null")
logger.info('Created GStreamer elements')
self.pipeline = Gst.Pipeline()
for element in [self.appsrc, self.decodebin, self.audioconvert, self.audioresample, self.tee,
self.queue1, self.filesink,
self.queue2, self.asr, self.fakesink]:
logger.debug("Adding %s to the pipeline" % element)
self.pipeline.add(element)
logger.info('Linking GStreamer elements')
self.appsrc.link(self.decodebin)
#self.appsrc.link(self.audioconvert)
self.decodebin.connect('pad-added', self._connect_decoder)
self.audioconvert.link(self.audioresample)
self.audioresample.link(self.tee)
self.tee.link(self.queue1)
self.queue1.link(self.filesink)
self.tee.link(self.queue2)
self.queue2.link(self.asr)
self.asr.link(self.fakesink)
# Create bus and connect several handlers
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.enable_sync_message_emission()
self.bus.connect('message::eos', self._on_eos)
self.bus.connect('message::error', self._on_error)
#self.bus.connect('message::cutter', self._on_cutter)
self.asr.connect('partial-result', self._on_partial_result)
self.asr.connect('final-result', self._on_final_result)
self.asr.connect('full-final-result', self._on_full_final_result)
logger.info("Setting pipeline to READY")
self.pipeline.set_state(Gst.State.READY)
logger.info("Set pipeline to READY")
def _connect_decoder(self, element, pad):
logger.info("%s: Connecting audio decoder" % self.request_id)
pad.link(self.audioconvert.get_static_pad("sink"))
logger.info("%s: Connected audio decoder" % self.request_id)
def _on_partial_result(self, asr, hyp):
logger.info("%s: Got partial result: %s" % (self.request_id, hyp.decode('utf8')))
if self.result_handler:
self.result_handler(hyp, False)
def _on_final_result(self, asr, hyp):
logger.info("%s: Got final result: %s" % (self.request_id, hyp.decode('utf8')))
if self.result_handler:
self.result_handler(hyp, True)
def _on_full_final_result(self, asr, result_json):
logger.info("%s: Got full final result: %s" % (self.request_id, result_json.decode('utf8')))
if self.full_result_handler:
self.full_result_handler(result_json)
def _on_error(self, bus, msg):
self.error = msg.parse_error()
logger.error(self.error)
self.finish_request()
if self.error_handler:
self.error_handler(self.error[0].message)
def _on_eos(self, bus, msg):
logger.info('%s: Pipeline received eos signal' % self.request_id)
#self.decodebin.unlink(self.audioconvert)
self.finish_request()
if self.eos_handler:
self.eos_handler[0](self.eos_handler[1])
def get_adaptation_state(self):
return self.asr.get_property("adaptation-state")
def set_adaptation_state(self, adaptation_state):
"""Sets the adaptation state to a certian value, previously retrieved using get_adaptation_state()
Should be called after init_request(..)
"""
return self.asr.set_property("adaptation-state", adaptation_state)
def finish_request(self):
logger.info("%s: Resetting decoder state" % self.request_id)
if self.outdir:
self.filesink.set_state(Gst.State.NULL)
self.filesink.set_property('location', "/dev/null")
self.filesink.set_state(Gst.State.PLAYING)
self.pipeline.set_state(Gst.State.NULL)
self.request_id = "<undefined>"
def init_request(self, id, caps_str):
self.request_id = id
logger.info("%s: Initializing request" % (self.request_id))
if caps_str and len(caps_str) > 0:
logger.info("%s: Setting caps to %s" % (self.request_id, caps_str))
caps = Gst.caps_from_string(caps_str)
self.appsrc.set_property("caps", caps)
else:
#caps = Gst.caps_from_string("")
self.appsrc.set_property("caps", None)
#self.pipeline.set_state(Gst.State.READY)
pass
#self.appsrc.set_state(Gst.State.PAUSED)
if self.outdir:
self.pipeline.set_state(Gst.State.PAUSED)
self.filesink.set_state(Gst.State.NULL)
self.filesink.set_property('location', "%s/%s.raw" % (self.outdir, id))
self.filesink.set_state(Gst.State.PLAYING)
#self.filesink.set_state(Gst.State.PLAYING)
#self.decodebin.set_state(Gst.State.PLAYING)
self.pipeline.set_state(Gst.State.PLAYING)
self.filesink.set_state(Gst.State.PLAYING)
# push empty buffer (to avoid hang on client diconnect)
#buf = Gst.Buffer.new_allocate(None, 0, None)
#self.appsrc.emit("push-buffer", buf)
# reset adaptation state
self.set_adaptation_state("")
def process_data(self, data):
logger.debug('%s: Pushing buffer of size %d to pipeline' % (self.request_id, len(data)))
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
self.appsrc.emit("push-buffer", buf)
logger.debug('%s: Pushing buffer done' % self.request_id)
def end_request(self):
logger.info("%s: Pushing EOS to pipeline" % self.request_id)
self.appsrc.emit("end-of-stream")
def set_result_handler(self, handler):
self.result_handler = handler
def set_full_result_handler(self, handler):
self.full_result_handler = handler
def set_eos_handler(self, handler, user_data=None):
self.eos_handler = (handler, user_data)
def set_error_handler(self, handler):
self.error_handler = handler
def cancel(self):
logger.info("%s: Sending EOS to pipeline in order to cancel processing" % self.request_id)
self.appsrc.emit("end-of-stream")
#self.asr.set_property("silent", True)
#self.pipeline.set_state(Gst.State.NULL)
#if (self.pipeline.get_state() == Gst.State.PLAYING):
#logger.debug("Sending EOS to pipeline")
#self.pipeline.send_event(Gst.Event.new_eos())
#self.pipeline.set_state(Gst.State.READY)
logger.info("%s: Cancelled pipeline" % self.request_id)
|
{
"content_hash": "0b80ca9cb171e8c4201e67690ce3fb62",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 106,
"avg_line_length": 38.18141592920354,
"alnum_prop": 0.6256808436667053,
"repo_name": "claritylab/kaldi-gstreamer-server",
"id": "f70fa4716a8ad9de69464eb03d1b8acc667bcf25",
"size": "8629",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "kaldigstserver/decoder2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3214"
},
{
"name": "HTML",
"bytes": "8835"
},
{
"name": "JavaScript",
"bytes": "33511"
},
{
"name": "Python",
"bytes": "70764"
},
{
"name": "Shell",
"bytes": "2151"
},
{
"name": "Thrift",
"bytes": "931"
}
],
"symlink_target": ""
}
|
from argus.backends import base as base_backend
from argus.backends import windows as windows_backend
from argus import config as argus_config
CONFIG = argus_config.CONFIG
class LocalBackend(windows_backend.WindowsBackendMixin,
windows_backend.BaseMetadataProviderMixin,
base_backend.BaseBackend):
"""Local Backend for testing Windows machines
that are running, have git installed and winrm configured"""
def __init__(self, name=None, userdata=None, metadata=None,
availability_zone=None):
super(LocalBackend, self).__init__(name=name, userdata=userdata,
metadata=metadata,
availability_zone=availability_zone)
self._username = CONFIG.local.username
self._password = CONFIG.local.password
self._ip = CONFIG.local.ip
def get_remote_client(self, protocol='http', **kwargs):
super(LocalBackend, self).get_remote_client(self._username,
self._password,
protocol, **kwargs)
def setup_instance(self):
pass
def cleanup(self):
pass
def save_instance_output(self):
pass
def get_password(self):
return lambda: self._password
def get_username(self):
return lambda: self._username
def floating_ip(self):
return self._ip
|
{
"content_hash": "e0d867072c6e9d3cf6ca4153c5d56ce3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 34.74418604651163,
"alnum_prop": 0.5876840696117804,
"repo_name": "micumatei/cloudbase-init-ci",
"id": "673a0f9b7e99e695523e23f8683bdcb67b915743",
"size": "2133",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "argus/backends/local/local_backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "138"
},
{
"name": "PowerShell",
"bytes": "15716"
},
{
"name": "Python",
"bytes": "571424"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
import urllib
import gzip
import cStringIO
import subprocess
from debian import deb822
import argparse
destdir="newpkg"
arches=["amd64", "i386"]
REPO="http://repo.steampowered.com/steamrt"
DIST="scout"
COMPONENT="main"
out = open("runtime-generated.nix", "w");
out.write("# This file is autogenerated! Do not edit it yourself, use update-runtime.py for regeneration.\n")
out.write("{ fetchurl }:\n")
out.write("\n")
out.write("{\n")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--beta", help="build beta runtime", action="store_true")
parser.add_argument("-d", "--debug", help="build debug runtime", action="store_true")
parser.add_argument("--symbols", help="include debugging symbols", action="store_true")
parser.add_argument("--repo", help="source repository", default=REPO)
return parser.parse_args()
def download_file(file_base, file_name, file_url):
file_shortname = file_base + ".deb"
md5 = subprocess.check_output(["nix-prefetch-url", "--type", "md5", "--name", file_shortname, file_url])
out.write(" rec {\n")
out.write(" name = \"%s\";\n" % file_name)
out.write(" md5 = \"%s\";\n" % md5.strip())
out.write(" source = fetchurl {\n")
out.write(" url = \"%s\";\n" % file_url)
out.write(" inherit md5;\n")
out.write(" name = \"%s\";\n" % file_shortname)
out.write(" };\n")
out.write(" }\n")
def install_binaries (arch, binarylist):
installset = binarylist.copy()
#
# Load the Packages file so we can find the location of each binary package
#
packages_url = "%s/dists/%s/%s/binary-%s/Packages" % (REPO, DIST, COMPONENT, arch)
print("Downloading %s binaries from %s" % (arch, packages_url))
for stanza in deb822.Packages.iter_paragraphs(urllib.urlopen(packages_url)):
p = stanza['Package']
if p in installset:
print("DOWNLOADING BINARY: %s" % p)
#
# Download the package and install it
#
file_url="%s/%s" % (REPO,stanza['Filename'])
download_file(p, os.path.splitext(os.path.basename(stanza['Filename']))[0], file_url)
installset.remove(p)
for p in installset:
#
# There was a binary package in the list to be installed that is not in the repo
#
e = "ERROR: Package %s not found in Packages file %s\n" % (p, packages_url)
sys.stderr.write(e)
def install_symbols (arch, binarylist):
#
# Load the Packages file to find the location of each symbol package
#
packages_url = "%s/dists/%s/%s/debug/binary-%s/Packages" % (REPO, DIST, COMPONENT, arch)
print("Downloading %s symbols from %s" % (arch, packages_url))
for stanza in deb822.Packages.iter_paragraphs(urllib.urlopen(packages_url)):
p = stanza['Package']
m = re.match('([\w\-\.]+)\-dbgsym', p)
if m and m.group(1) in binarylist:
print("DOWNLOADING SYMBOLS: %s" % p)
#
# Download the package and install it
#
file_url="%s/%s" % (REPO,stanza['Filename'])
download_file(p, os.path.splitext(os.path.basename(stanza['Filename']))[0], file_url)
args = parse_args()
REPO=args.repo
if args.beta:
DIST="steam_beta"
if args.debug:
COMPONENT = "debug"
# Process packages.txt to get the list of source and binary packages
source_pkgs = set()
binary_pkgs = set()
print ("Creating runtime-generated.nix")
pkgs_list = urllib.urlopen("https://raw.githubusercontent.com/ValveSoftware/steam-runtime/master/packages.txt").readlines()
for line in pkgs_list:
if line[0] != '#':
toks = line.split()
if len(toks) > 1:
source_pkgs.add(toks[0])
binary_pkgs.update(toks[1:])
# remove development packages for end-user runtime
if not args.debug:
binary_pkgs -= {x for x in binary_pkgs if re.search('-dbg$|-dev$|-multidev$',x)}
for arch in arches:
out.write(" %s = [\n" % arch)
install_binaries(arch, binary_pkgs)
if args.symbols:
install_symbols(arch, binary_pkgs)
out.write(" ];\n");
out.write("}\n")
# vi: set noexpandtab:
|
{
"content_hash": "96814fda9c3132abf9fff89c2e4d960a",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 123,
"avg_line_length": 29.383458646616543,
"alnum_prop": 0.6678607983623337,
"repo_name": "triton/triton",
"id": "3244297084906e7fa19f33ab8154b61cc568c90f",
"size": "4097",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pkgs/all-pkgs/s/steam/update-runtime.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19566"
},
{
"name": "C++",
"bytes": "654"
},
{
"name": "CMake",
"bytes": "1035"
},
{
"name": "CSS",
"bytes": "1837"
},
{
"name": "Dockerfile",
"bytes": "553"
},
{
"name": "Emacs Lisp",
"bytes": "673"
},
{
"name": "Go",
"bytes": "373"
},
{
"name": "JavaScript",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "6740090"
},
{
"name": "Perl",
"bytes": "166382"
},
{
"name": "Python",
"bytes": "36406"
},
{
"name": "Ruby",
"bytes": "6523"
},
{
"name": "Shell",
"bytes": "469375"
},
{
"name": "XSLT",
"bytes": "6371"
},
{
"name": "sed",
"bytes": "794"
}
],
"symlink_target": ""
}
|
from google.cloud import retail_v2
async def sample_create_control():
# Create a client
client = retail_v2.ControlServiceAsyncClient()
# Initialize request argument(s)
control = retail_v2.Control()
control.display_name = "display_name_value"
control.solution_types = "SOLUTION_TYPE_SEARCH"
request = retail_v2.CreateControlRequest(
parent="parent_value",
control=control,
control_id="control_id_value",
)
# Make the request
response = await client.create_control(request=request)
# Handle the response
print(response)
# [END retail_v2_generated_ControlService_CreateControl_async]
|
{
"content_hash": "976cc18150173ec9b650e34b07a9b8ce",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 62,
"avg_line_length": 26.4,
"alnum_prop": 0.6954545454545454,
"repo_name": "googleapis/python-retail",
"id": "57c27d004ab84376c487972ad987bf6f1bf7c223",
"size": "2045",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/retail_v2_generated_control_service_create_control_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
with_statement)
from Crypto.PublicKey import RSA
import datetime
from podship.models import UserBase
from firenado.config import load_yaml_config_file
from firenado import service
from firenado.util import random_string
from passlib.hash import bcrypt
from sqlalchemy.orm.exc import NoResultFound
import os
class UserService(service.FirenadoService):
def __init__(self, handler, data_source=None):
super(UserService, self).__init__(handler, data_source)
#self.security = load_yaml_config_file()
self.project_root = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
self.security_conf = load_yaml_config_file(
os.path.join(self.project_root, 'conf', 'security.yml'))
def create(self, user_data, created_utc=None, db_session=None):
if not created_utc:
created_utc = datetime.datetime.utcnow()
user = UserBase()
user.user_name = user_data['user_name']
# TODO: Generate the serialized private key
user.serialized_private_key = self.generate_key(user_data['password'])
user.getting_started = True
user.disable_mail = False
# TODO: Handle language
user.language = 'en'
user.email = user_data['email']
# TODO: encrypt the password
user.encrypted_password = bcrypt.encrypt(
self.get_peppered_password(user_data['password']))
# Not used
user.invitation_token = None
user.invitation_sent_at = None
user.reset_password_sent_at = None
user.sign_in_count = 0
user.current_sign_in_at = None
user.last_sign_in_at = None
user.current_sign_in_ip = None
user.last_sign_in_ip = None
user.created_at = created_utc
user.updated_at = created_utc
user.invitation_service = None
user.invitation_identifier = None
user.invitation_limit = None
user.invited_by_id = None
user.invited_by_type = None
user.authentication_token = None
user.unconfirmed_email = None
user.confirm_email_token = None
user.locked_at = None
# TODO: This should be set based on an application settings
user.show_community_spotlight_in_stream = True
user.auto_follow_back = False
user.auto_follow_back_aspect_id = None
user.hidden_shareables = None
user.reset_password_sent_at = None
user.last_seen = None
user.remove_after = None
user.export = None
user.exported_at = None
user.exporting = False
user.strip_exif = True
user.exported_photos_file = None
user.exported_photos_at = None
user.exporting_photos = False
commit = False
if not db_session:
db_session = self.get_data_source(
'diasporapy').get_connection()['session']
commit = True
db_session.add(user)
if commit:
db_session.commit()
return user
def get_by_user_name(self, user_name, db_session=None):
if not db_session:
db_session = self.get_data_source(
'diasporapy').get_connection()['session']
auth_user = None
try:
auth_user = db_session.query(UserBase).filter(
UserBase.user_name == user_name).one()
except NoResultFound:
pass
return auth_user
def is_password_valid(self, challenge, encrypted_password):
return bcrypt.verify(
self.get_peppered_password(challenge), encrypted_password)
def get_peppered_password(self, password):
return '%s%s' % (password, self.security_conf['password']['pepper'])
def generate_key(self, passphrase):
""" FROM pyraspora: pyaspora.user.models
Generate a 2048-bit RSA key. The key will be stored in the User
object. The private key will be protected with password <passphrase>,
which is usually the user password.
"""
# TODO: I don't know if this is the way diaspora is handling the key
# Let's keep this way by now
# TODO: looks like this method is candidate to be part of some security
# toolkit
RSAkey = RSA.generate(4096)
print
private_key = RSAkey.exportKey(
format='PEM',
pkcs=1,
passphrase=passphrase
).decode("ascii")
return RSAkey.publickey().exportKey(
format='PEM',
pkcs=1
).decode("ascii")
|
{
"content_hash": "c94f222f230eb110496819dcedbf75ee",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 79,
"avg_line_length": 36.16279069767442,
"alnum_prop": 0.6113612004287245,
"repo_name": "piraz/diasporapy",
"id": "7006c1f50a06710414fff5e726d918af5c2b913c",
"size": "5327",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "podship/services/user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3151"
},
{
"name": "HTML",
"bytes": "13743"
},
{
"name": "JavaScript",
"bytes": "5895"
},
{
"name": "Python",
"bytes": "94094"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_naboo_swamhunt_large2.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "91aa2ff4e0a38ab3c314f76d0bdb5f9b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.6938110749185668,
"repo_name": "obi-two/Rebelion",
"id": "52fdf87f216fdc1279055d7846c7c199ae4c339f",
"size": "452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/building/poi/shared_naboo_swamhunt_large2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import unicodedata
__all__ = [
"EmojiData",
]
def get_unicode_data(emoji):
try:
name = [unicodedata.name(ch) for ch in emoji]
category = [unicodedata.category(ch) for ch in emoji]
except ValueError:
# Couldn't find for codepoint
name = [emoji]
category = ["unicode_other"]
return name, category
class EmojiData:
__slots__ = (
"raw",
"id",
"unicode",
"custom",
"managed",
"name",
"category",
"roles",
"guild",
)
def __init__(self, emoji):
self.raw = emoji
if isinstance(emoji, str):
name, category = get_unicode_data(emoji)
self.id = 0
self.unicode = emoji
self.custom = False
self.managed = False
self.name = name
self.category = category
self.roles = []
self.guild = None
else:
self.id = emoji.id
self.unicode = ""
self.custom = True
self.managed = getattr(emoji, "managed", None)
self.name = [emoji.name]
self.category = ["custom"]
self.roles = getattr(emoji, "roles", None)
self.guild = getattr(emoji, "guild", None)
@property
def mention(self):
if self.id:
return f"<:{self.name[0]}:{self.id}>"
else:
return self.unicode
@property
def cache_id(self):
return (self.id, self.unicode)
def values(self):
return {
"emoji_id": self.id,
"emoji_unicode": self.unicode,
"is_custom": self.custom,
"is_managed": self.managed,
"is_deleted": False,
"name": self.name,
"category": self.category,
"roles": list(map(lambda r: r.id, self.roles or [])),
"guild_id": getattr(self.guild, "id", None),
}
def __str__(self):
return str(self.id or self.unicode)
def __repr__(self):
return f"<EmojiData {self}>"
|
{
"content_hash": "5b3afa450fea9c41e18a0c555508af5d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 65,
"avg_line_length": 24.576470588235296,
"alnum_prop": 0.49114408808042126,
"repo_name": "strinking/statbot",
"id": "280308ff5b41d8b8272202260178fd44fba3843e",
"size": "2465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statbot/emoji.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95550"
},
{
"name": "Shell",
"bytes": "862"
}
],
"symlink_target": ""
}
|
import logging
import threading
import time
import os
import Server
import websocket
import LightSchedule
import PCA9685
import PCA9685_dummy
import Settings
import Channel
from WeatherType import WeatherType
import json
from objdict import ObjDict
from outlet import outlet
DEBUG = True
MAIN_LOOP_TIME = 5
MAIN_LOOP_HEALTH_FREQ = 120
LED_MAX = 4095 # Max Brightness
LED_MIN = 0 # Min Brightness (off)
def makeLogger():
'''sets up the logger'''
logging.raiseExceptions = True
logdir = 'logs/'
if not os.path.exists(logdir):
os.makedirs(logdir)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')
handler = logging.handlers.TimedRotatingFileHandler(logdir + "whet.log",
when='midnight',
interval=1,
backupCount=7)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
debug_handler = logging.handlers.TimedRotatingFileHandler(logdir + "whet-DEBUG.log",
when='midnight',
interval=1,
backupCount=2)
debug_handler.setLevel(logging.DEBUG)
debug_handler.setFormatter(formatter)
logger.addHandler(debug_handler)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setLevel(logging.INFO)
# consoleHandler.setFormatter(formatter)
# logger.addHandler(consoleHandler)
return logger
def main_loop():
logger = makeLogger()
logger.info("Whet started")
# counts for debug/ health report
loops = 0
dead_tornado_cnt = 0
dead_channel_cnt = 0
# Settings ----------------------------------------------------------------
settings = Settings.Settings()
# Server ------------------------------------------------------------------
tornado_server = Server.Server()
tornado_server.start()
time.sleep(1)
# connect to the server
light_schedule = LightSchedule.LightSchedule()
# Initialise the PCA9685, if cant use a dummy (for testing on machine that is not pi)
try:
pwm = PCA9685.PCA9685()
# Alternatively specify a different address and/or bus:
#pwm = Adafruit_PCA9685.PCA9685(address=0x41, busnum=2)
except ImportError:
msg = "UNABLE TO LOAD PCA9685... no pwm values will set!"
logger.exception(msg)
print(msg)
pwm = PCA9685_dummy.PCA9685()
# Set frequency to 1000hz... LEDS.
pwm.set_pwm_freq(1000)
pwm.set_all(LED_MIN)
time.sleep(1)
try:
channel_threads = []
while True:
settings.read_file()
# untested
if not tornado_server.is_alive():
dead_tornado_cnt += 1
logger.error("Tornado thread died %s", dead_tornado_cnt)
tornado_server = Server.Server()
tornado_server.start()
time.sleep(1)
# restart threads if they die, this should never happen
for i, val in enumerate(channel_threads):
if not val.is_alive():
dead_channel_cnt += 1
logger.error(
"THREAD %s IS DEAD: Dead thread count = %s", val.c_id, dead_channel_cnt)
channel_threads[i] = Channel.Channel(
val.c_id, pwm, light_schedule)
channel_threads[i].start()
if len(channel_threads) != light_schedule.get_number_of_channels():
logger.info("Thread to channel mismatch config=%s threads=%s",
light_schedule.get_number_of_channels(), len(channel_threads))
for i, val in enumerate(channel_threads):
channel_threads[i].cancel()
while channel_threads[i].is_alive():
logger.info("waiting for thread to die")
time.sleep(1)
channel_threads = [] # reset list
for i in range(light_schedule.get_number_of_channels()):
channel_obj = Channel.Channel(i, pwm, light_schedule)
channel_threads.append(channel_obj)
channel_obj.start()
a_data = []
conn = websocket.create_connection("ws://localhost:7999/chat/websocket?id=py", timeout=2)
for i, val in enumerate(channel_threads):
if val.is_alive:
a_data.append(val.broadcast())
c_data = ObjDict()
c_data.status = a_data
conn.send(json.dumps(c_data, sort_keys=True, indent=4))
conn.close(reason="whet.py loop finished", timeout=2)
if loops >= MAIN_LOOP_HEALTH_FREQ:
loops = 0
logger.info("Dead Channels:%s | Dead Tornados:%s",
dead_channel_cnt, dead_tornado_cnt)
time.sleep(MAIN_LOOP_TIME)
if settings.__dict__.get('outlet_run', False): outlet.run()
loops += 1
except KeyboardInterrupt:
logger.info('KeyboardInterrupt Quit')
pwm.set_all(LED_MIN)
finally:
for i, val in enumerate(channel_threads):
logger.info('Cancel channel %s', i)
channel_threads[i].cancel()
pwm.set_all(LED_MIN)
logger.info('Killing server thread')
conn.close()
tornado_server.stop()
if __name__ == "__main__":
main_loop()
|
{
"content_hash": "64e34f4d6a3ee217bf9ea0269d519978",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 101,
"avg_line_length": 33.38505747126437,
"alnum_prop": 0.5407126872095025,
"repo_name": "mike-gracia/whet",
"id": "79dd8375ef5b4420613c6c108daae102d4c4ea4c",
"size": "5832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2469"
},
{
"name": "HTML",
"bytes": "11953"
},
{
"name": "JavaScript",
"bytes": "191313"
},
{
"name": "Python",
"bytes": "35197"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
import os
import sys
import tempfile
import numpy
from numpy.testing import TestCase, dec, assert_, run_module_suite
from scipy.weave import inline_tools,ext_tools
from scipy.weave.build_tools import msvc_exists, gcc_exists
from scipy.weave.catalog import unique_file
from scipy.weave.numpy_scalar_spec import numpy_complex_scalar_converter
def unique_mod(d,file_name):
f = os.path.basename(unique_file(d,file_name))
m = os.path.splitext(f)[0]
return m
#----------------------------------------------------------------------------
# Scalar conversion test classes
# int, float, complex
#----------------------------------------------------------------------------
class NumpyComplexScalarConverter(TestCase):
compiler = ''
def setUp(self):
self.converter = numpy_complex_scalar_converter()
@dec.slow
def test_type_match_string(self):
assert_(not self.converter.type_match('string'))
@dec.slow
def test_type_match_int(self):
assert_(not self.converter.type_match(5))
@dec.slow
def test_type_match_float(self):
assert_(not self.converter.type_match(5.))
@dec.slow
def test_type_match_complex128(self):
assert_(self.converter.type_match(numpy.complex128(5.+1j)))
@dec.slow
def test_complex_var_in(self):
mod_name = sys._getframe().f_code.co_name + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = numpy.complex(1.+1j)
code = "a=std::complex<double>(2.,2.);"
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = numpy.complex128(1.+1j)
test(b)
try:
b = 1.
test(b)
except TypeError:
pass
try:
b = 'abc'
test(b)
except TypeError:
pass
@dec.slow
def test_complex_return(self):
mod_name = sys._getframe().f_code.co_name + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 1.+1j
code = """
a= a + std::complex<double>(2.,2.);
return_val = PyComplex_FromDoubles(a.real(),a.imag());
"""
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 1.+1j
c = test(b)
assert_(c == 3.+3j)
@dec.slow
def test_inline(self):
a = numpy.complex128(1+1j)
result = inline_tools.inline("return_val=1.0/a;",['a'])
assert_(result == .5-.5j)
for _n in dir():
if _n[-9:] == 'Converter':
if msvc_exists():
exec("class Test%sMsvc(%s):\n compiler = 'msvc'" % (_n,_n))
else:
exec("class Test%sUnix(%s):\n compiler = ''" % (_n,_n))
if gcc_exists():
exec("class Test%sGcc(%s):\n compiler = 'gcc'" % (_n,_n))
def setup_test_location():
test_dir = tempfile.mkdtemp()
sys.path.insert(0,test_dir)
return test_dir
test_dir = setup_test_location()
def teardown_test_location():
import tempfile
test_dir = os.path.join(tempfile.gettempdir(),'test_files')
if sys.path[0] == test_dir:
sys.path = sys.path[1:]
return test_dir
if not msvc_exists():
for _n in dir():
if _n[:8] == 'TestMsvc': exec('del '+_n)
else:
for _n in dir():
if _n[:8] == 'TestUnix': exec('del '+_n)
if not (gcc_exists() and msvc_exists() and sys.platform == 'win32'):
for _n in dir():
if _n[:7] == 'TestGcc': exec('del '+_n)
if __name__ == "__main__":
run_module_suite()
|
{
"content_hash": "1a64037d5f5d22563ce0dfdb6a083b57",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 77,
"avg_line_length": 28.107142857142858,
"alnum_prop": 0.5519695044472681,
"repo_name": "RobertABT/heightmap",
"id": "2c19ce4f420cb886823caff0532e6014f7aba8a3",
"size": "3935",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "build/scipy/scipy/weave/tests/test_numpy_scalar_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25165856"
},
{
"name": "C++",
"bytes": "5251754"
},
{
"name": "CSS",
"bytes": "17123"
},
{
"name": "FORTRAN",
"bytes": "6353469"
},
{
"name": "JavaScript",
"bytes": "816504"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Objective-C",
"bytes": "284551"
},
{
"name": "Python",
"bytes": "13223936"
},
{
"name": "TeX",
"bytes": "37261"
}
],
"symlink_target": ""
}
|
from .elastic_first_recommender import (
article_recommendations_for_user,
article_search_for_user
)
|
{
"content_hash": "c0a89bbad7799d7199bf01fae6841e4a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 40,
"avg_line_length": 27.25,
"alnum_prop": 0.7522935779816514,
"repo_name": "mircealungu/Zeeguu-Core",
"id": "58e76dd9c449932674b14144731d75e96f02babf",
"size": "109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zeeguu_core/content_recommender/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "164762"
},
{
"name": "Shell",
"bytes": "366"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='ChannelWorm',
version='0.1',
packages=[
'channelworm',
'channelworm.ion_channel',
'channelworm.digitizer',
'channelworm.account',
'channelworm.web_app',
'channelworm.fitter',
'channelworm.predictor'
],
long_description=open('README.md').read(),
install_requires=[
'unicodecsv',
'pillow',
'pytest',
'pytest-django',
'django',
'django-formtools',
'django-sql-explorer',
]
)
|
{
"content_hash": "ea048f0c12ec6e5912bba221513e6288",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 46,
"avg_line_length": 22.2,
"alnum_prop": 0.5477477477477477,
"repo_name": "openworm/ChannelWorm",
"id": "624a8dcbf18fb23745f924d767c4e52d8b72f44e",
"size": "555",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11459"
},
{
"name": "HTML",
"bytes": "185500"
},
{
"name": "JavaScript",
"bytes": "598029"
},
{
"name": "Jupyter Notebook",
"bytes": "411940"
},
{
"name": "PLpgSQL",
"bytes": "2505"
},
{
"name": "Python",
"bytes": "252678"
},
{
"name": "Shell",
"bytes": "2962"
},
{
"name": "TeX",
"bytes": "13842"
}
],
"symlink_target": ""
}
|
"""
Report connected FTDI devices. This may be useful in obtaining
serial numbers to use as the device_id parameter of the Device()
constructor to communicate with a specific device when more than
one is present.
example usage:
$ python pylibftdi/examples/list_devices.py
FTDI:UB232R:FTAS1UN5
FTDI:UM232R USB <-> Serial:FTE4FFVQ
To open a device specifically to communicate with the second of
these devices, the following would be used:
>>> from pylibftdi import Device
>>> dev = Device(device_id="FTE4FFVQ")
>>>
Copyright (c) 2011-2014 Ben Bass <benbass@codedstructure.net>
All rights reserved.
"""
from pylibftdi import Driver
def get_ftdi_device_list():
"""
return a list of lines, each a colon-separated
vendor:product:serial summary of detected devices
"""
dev_list = []
for device in Driver().list_devices():
# list_devices returns bytes rather than strings
dev_info = map(lambda x: x.decode('latin1'), device)
# device must always be this triple
vendor, product, serial = dev_info
dev_list.append("%s:%s:%s" % (vendor, product, serial))
return dev_list
if __name__ == '__main__':
for device in get_ftdi_device_list():
print(device)
|
{
"content_hash": "3b1c0dde3ad2bd4b71c91b0305a78d51",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 64,
"avg_line_length": 29.11627906976744,
"alnum_prop": 0.6821086261980831,
"repo_name": "claudyus/pylibftdi",
"id": "7fbd9ce518ca995c0a0fcb0fe744966a70f0883e",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylibftdi/examples/list_devices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71273"
}
],
"symlink_target": ""
}
|
"""
django_geopostcodes.managers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Django model managers for django-geopostcodes.
"""
from __future__ import absolute_import, print_function, unicode_literals
from django.db.models import QuerySet
from django.db.models.manager import Manager
from django.contrib.gis.db import models
class LocalityQuerySet(QuerySet):
"""
Locality QuerySet.
"""
anything_fields = ('country',
'region1', 'region2', 'region3', 'region4',
'locality', 'postcode', 'suburb')
def anything(self, lookup_type, value, fields=anything_fields):
queries = [models.Q(**{'%s__%s' % (field, lookup_type): value}) for field in fields]
# Take one Q object from the list
query = queries.pop()
# Or the Q object with the ones remaining in the list
for item in queries:
query |= item
return self.filter(query)
def anything_icontains(self, value, fields=anything_fields):
return self.anything('icontains', value, fields)
def anything_contains(self, value, fields=anything_fields):
return self.anything('contains', value, fields)
def anything_exact(self, value, fields=anything_fields):
return self.anything('exact', value, fields)
def anything_iexact(self, value, fields=anything_fields):
return self.anything('iexact', value, fields)
def anything_startswith(self, value, fields=anything_fields):
return self.anything('startswith', value, fields)
def anything_istartswith(self, value, fields=anything_fields):
return self.anything('istartswith', value, fields)
def anything_endswith(self, value, fields=anything_fields):
return self.anything('endswith', value, fields)
def anything_iendswith(self, value, fields=anything_fields):
return self.anything('iendswith', value, fields)
class LocalityManager(Manager.from_queryset(LocalityQuerySet)):
"Overrides Manager to return Geographic QuerySets."
# This manager should be used for queries on related fields
# so that geometry columns on Oracle and MySQL are selected
# properly.
use_for_related_fields = True
|
{
"content_hash": "2ccf4e7903d3b08ffb7399e176cdf092",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 92,
"avg_line_length": 34.12307692307692,
"alnum_prop": 0.6681695220919748,
"repo_name": "alexhayes/django-geopostcodes",
"id": "a0e0ad416c334b9107dbcad7f3984b03250da299",
"size": "2242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_geopostcodes/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2426"
},
{
"name": "Python",
"bytes": "26750"
}
],
"symlink_target": ""
}
|
import re
import json
from os import listdir
class WD:
def __init__(self):
self.method = []
self.parse()
self.writeOutput()
def parse(self):
self.method = [ WDMethod(f) for f in listdir('.') if re.match('^[a-zA-Z_].*\.json$', f) ]
def writeOutput(self):
wavedromSVH = open('wavedrom.svh', 'w')
for m in self.method:
wavedromSVH.write('`include "%s"\n' % m.ofile)
m.writeOutput()
wavedromSVH.close()
class WDMethod:
def __init__(self, ifile):
self.ifile = ifile
self.name = ''
self.clk = ''
self.signal = []
self.input = []
self.output = []
self.edge = []
self.edgeTypes = '[<>~\-|]+'
self.rawData = {}
self.parse()
def parse(self):
with open(self.ifile) as _input:
self.rawData = json.load(_input)
self.name = self.rawData['name']
self.ofile = self.name + '.svh'
# clk is anything that matches 'p' in the wave (but only 1 is valid hence the [0])
self.clk = [ clk for clk in self.rawData['signal'] if re.match('p', clk['wave']) ][0]
# signals are anything that don't match 'p' in the wave
self.signal = [ signal for signal in self.rawData['signal'] if not re.match('p', signal['wave']) ]
try:
self.input = self.rawData['input']
except KeyError:
pass
try:
self.output = self.rawData['output']
except KeyError:
pass
try:
self.edge = self.rawData['edge']
except KeyError:
pass
def writeOutput(self):
cycles = []
ofile = open(self.ofile, 'w')
# header
if len(self.input) + len(self.output) > 0:
if len(self.input) > 0:
io = [ "input %s %s" % (_input['type'], _input['name']) for _input in self.input ]
if len(self.output) > 0:
io += [ "output %s %s" % (_output['type'], _output['name']) for _output in self.output ]
cycles.append('task %s(%s);\n' % (self.name, ', '.join(io)))
else:
cycles.append('task %s();\n' % self.name)
# build each clock cycle
for i in range( 0, len(self.clk['wave']) ):
thisCycle = ''
waitThisCycle = self.isWait(self.clk['wave'][i])
waitLastCycle = self.isWait(self.clk['wave'][i-1])
waitBothCycles = waitThisCycle and waitLastCycle
waitNeitherCycle = not (waitThisCycle or waitLastCycle)
if not (waitThisCycle or waitLastCycle):
thisCycle += self.step()
thisCycle += self.writeSignals(i)
elif waitThisCycle and not waitLastCycle:
thisCycle += self.getWaitFor(i)
elif waitLastCycle and not waitThisCycle:
thisCycle += self.writeSignals(i)
elif waitLastCycle and waitThisCycle:
thisCycle += self.writeSignals(i)
thisCycle += self.getWaitFor(i)
thisCycle += self.captureOutputs(i)
if thisCycle != '':
cycles.append(thisCycle)
# footer
cycles.append('endtask')
ofile.write(''.join(cycles))
ofile.close()
def writeSignals(self, idx):
_thisCycle = ''
# if a signal has a new value for this cycle, assign it
for s in self.signal:
if 'input' in s and s['input']:
break
else:
if self.isBinary(s['wave'][idx]):
_thisCycle += " %s = 'h%s;\n" % (s['name'], s['wave'][idx])
elif self.isValue(s['wave'][idx]):
_thisCycle += " %s = %s;\n" % (s['name'], s['data'].pop(0))
return _thisCycle
def captureOutputs(self, idx):
_thisCycle = ''
for s in self.signal:
if 'output' in s and s['output']:
if self.isValue(s['wave'][idx-1]):
_thisCycle += " %s = %s;\n" % (s['data'].pop(0), s['name'])
return _thisCycle
def isBinary(self, value):
return value in [ "0", "1", "x", "X" ]
def isValue(self, value):
return value in [ "=" ]
def isWait(self, value):
return value in [ "|" ]
def step(self, num='1', loop='repeat'):
step = ''
step += '%sstep();\n' % (' ' * (1 + int(num != '1')))
step += '%snextSamplePoint();\n' % (' ' * (1 + int(num != '1')))
if num != '1':
step = ' %s (%s) begin\n' % (loop, num) + step + ' end\n'
return step
def getWaitFor(self, nodeIdx):
cond = [ e for e in self.edge if re.match('.%s%s' % (self.edgeTypes, self.clk['node'][nodeIdx+1]), e) ][0]
cond = re.sub('.* ', '', cond)
if self.clk['node'][nodeIdx] == '.':
return self.step("!(%s)" % cond, 'while')
else:
return self.step("$urandom_range(%s)" % cond)
if __name__ == "__main__":
print ("Info: Writing wavedrom output.")
wd = WD()
|
{
"content_hash": "72f1bc97871549db2cdbd471c9f2f856",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 114,
"avg_line_length": 29.900584795321638,
"alnum_prop": 0.4971640915313906,
"repo_name": "nosnhojn/svunit-code",
"id": "3d2a444a2de55e55c80f9200e0c8ae4772fed81d",
"size": "5865",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bin/wavedromSVUnit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Coq",
"bytes": "63"
},
{
"name": "Filebench WML",
"bytes": "160"
},
{
"name": "Forth",
"bytes": "301"
},
{
"name": "Perl",
"bytes": "48525"
},
{
"name": "Python",
"bytes": "40075"
},
{
"name": "Shell",
"bytes": "3528"
},
{
"name": "SystemVerilog",
"bytes": "127033"
},
{
"name": "VHDL",
"bytes": "190"
}
],
"symlink_target": ""
}
|
"""
Presubmit script for the printing backend.
See https://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API.
"""
USE_PYTHON3 = True
def _CheckForStringViewFromNullableIppApi(input_api, output_api):
"""
Looks for all affected lines in CL where one constructs either
base::StringPiece or std::string_view from any ipp*() CUPS API call.
Assumes over-broadly that all ipp*() calls can return NULL.
Returns affected lines as a list of presubmit errors.
"""
# Attempts to detect source lines like:
# * base::StringPiece foo = ippDoBar();
# * base::StringPiece foo(ippDoBar());
# and the same for std::string_view.
string_view_re = input_api.re.compile(
r"^.+(base::StringPiece|std::string_view)\s+\w+( = |\()ipp[A-Z].+$")
violations = input_api.canned_checks._FindNewViolationsOfRule(
lambda extension, line:
not (extension in ("cc", "h") and string_view_re.search(line)),
input_api, None)
bulleted_violations = [" * {}".format(entry) for entry in violations]
if bulleted_violations:
return [output_api.PresubmitError(
("Possible construction of base::StringPiece or std::string_view "
"from CUPS IPP API (that can probably return NULL):\n{}").format(
"\n".join(bulleted_violations))),]
return []
def _CommonChecks(input_api, output_api):
"""Actual implementation of presubmits for the printing backend."""
results = []
results.extend(_CheckForStringViewFromNullableIppApi(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
"""Mandatory presubmit entry point."""
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
"""Mandatory presubmit entry point."""
return _CommonChecks(input_api, output_api)
|
{
"content_hash": "4e2d3ee55416eff369aa2c3b6a90c57d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 38.395833333333336,
"alnum_prop": 0.7037438958220293,
"repo_name": "ric2b/Vivaldi-browser",
"id": "4ef6bceedf30a869c3ea715ebdf95b0da599d0ce",
"size": "2006",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/printing/backend/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.template.loaders import app_directories, filesystem
from django.template.base import TemplateDoesNotExist
from django.template.loader import make_origin
from django.conf import settings
from rhinocloud.template.openoffice import OpenOfficeTemplate
import zipfile
def read_openoffice(filepath):
files = zipfile.ZipFile(filepath, 'r')
try:
return files.read('content.xml')
finally:
files.close()
class AppDirectoriesLoader(app_directories.Loader):
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
if zipfile.is_zipfile(filepath):
pass
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
pass
raise TemplateDoesNotExist(template_name)
class FileSystemLoader(filesystem.Loader):
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
if zipfile.is_zipfile(filepath):
try:
return (read_openoffice(filepath), filepath)
finally:
files.close()
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
def load_template(self, template_name, template_dirs=None):
source, display_name = self.load_template_source(template_name, template_dirs)
origin = make_origin(display_name, self.load_template_source, template_name, template_dirs)
try:
return OpenOfficeTemplate(source, origin, filepath=display_name), None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, display_name
|
{
"content_hash": "b4f1b30fc1aeeb514054d3c60ca85779",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 117,
"avg_line_length": 41.21666666666667,
"alnum_prop": 0.63040841083704,
"repo_name": "allanlei/rhinocloud-utils",
"id": "2a29aca4f35ce7f21b99707089983ef3939fb2df",
"size": "2473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rhinocloud/template/loaders/openoffice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "104927"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
import os
import sys
from nose.tools import raises
from cutadapt.scripts import cutadapt
from .utils import run, files_equal, datapath, cutpath, redirect_stderr, temporary_path
def test_example():
run('-N -b ADAPTER', 'example.fa', 'example.fa')
def test_small():
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq')
def test_empty():
'''empty input'''
run('-a TTAGACATATCTCCGTCG', 'empty.fastq', 'empty.fastq')
def test_newlines():
'''DOS/Windows newlines'''
run('-e 0.12 -b TTAGACATATCTCCGTCG', 'dos.fastq', 'dos.fastq')
def test_lowercase():
'''lowercase adapter'''
run('-b ttagacatatctccgtcg', 'lowercase.fastq', 'small.fastq')
def test_rest():
'''-r/--rest-file'''
with temporary_path('rest.tmp') as rest_tmp:
run(['-b', 'ADAPTER', '-N', '-r', rest_tmp], "rest.fa", "rest.fa")
assert files_equal(datapath('rest.txt'), rest_tmp)
def test_restfront():
with temporary_path("rest.txt") as path:
run(['-g', 'ADAPTER', '-N', '-r', path], "restfront.fa", "rest.fa")
assert files_equal(datapath('restfront.txt'), path)
def test_discard():
'''--discard'''
run("-b TTAGACATATCTCCGTCG --discard", "discard.fastq", "small.fastq")
def test_discard_untrimmed():
'''--discard-untrimmed'''
run('-b CAAGAT --discard-untrimmed', 'discard-untrimmed.fastq', 'small.fastq')
def test_plus():
'''test if sequence name after the "+" is retained'''
run("-e 0.12 -b TTAGACATATCTCCGTCG", "plus.fastq", "plus.fastq")
def test_extensiontxtgz():
'''automatic recognition of "_sequence.txt.gz" extension'''
run("-b TTAGACATATCTCCGTCG", "s_1_sequence.txt", "s_1_sequence.txt.gz")
def test_format():
'''the -f/--format parameter'''
run("-f fastq -b TTAGACATATCTCCGTCG", "small.fastq", "small.myownextension")
def test_minimum_length():
'''-m/--minimum-length'''
run("-c -m 5 -a 330201030313112312", "minlen.fa", "lengths.fa")
def test_too_short():
'''--too-short-output'''
run("-c -m 5 -a 330201030313112312 --too-short-output tooshort.tmp.fa", "minlen.fa", "lengths.fa")
assert files_equal(datapath('tooshort.fa'), "tooshort.tmp.fa")
os.remove('tooshort.tmp.fa')
def test_too_short_no_primer():
'''--too-short-output and --trim-primer'''
run("-c -m 5 -a 330201030313112312 --trim-primer --too-short-output tooshort.tmp.fa", "minlen.noprimer.fa", "lengths.fa")
assert files_equal(datapath('tooshort.noprimer.fa'), "tooshort.tmp.fa")
os.remove('tooshort.tmp.fa')
def test_maximum_length():
'''-M/--maximum-length'''
run("-c -M 5 -a 330201030313112312", "maxlen.fa", "lengths.fa")
def test_too_long():
'''--too-long-output'''
run("-c -M 5 --too-long-output toolong.tmp.fa -a 330201030313112312", "maxlen.fa", "lengths.fa")
assert files_equal(datapath('toolong.fa'), "toolong.tmp.fa")
os.remove('toolong.tmp.fa')
def test_length_tag():
'''454 data; -n and --length-tag'''
run("-n 3 -e 0.1 --length-tag length= " \
"-b TGAGACACGCAACAGGGGAAAGGCAAGGCACACAGGGGATAGG "\
"-b TCCATCTCATCCCTGCGTGTCCCATCTGTTCCCTCCCTGTCTCA", '454.fa', '454.fa')
def test_overlap_a():
'''-O/--overlap with -a (-c omitted on purpose)'''
run("-O 10 -a 330201030313112312 -e 0.0 -N", "overlapa.fa", "overlapa.fa")
def test_overlap_b():
'''-O/--overlap with -b'''
run("-O 10 -b TTAGACATATCTCCGTCG -N", "overlapb.fa", "overlapb.fa")
def test_qualtrim():
'''-q with low qualities'''
run("-q 10 -a XXXXXX", "lowqual.fastq", "lowqual.fastq")
def test_qualbase():
'''-q with low qualities, using ascii(quality+64) encoding'''
run("-q 10 --quality-base 64 -a XXXXXX", "illumina64.fastq", "illumina64.fastq")
def test_quality_trim_only():
'''only trim qualities, do not remove adapters'''
run("-q 10 --quality-base 64", "illumina64.fastq", "illumina64.fastq")
def test_twoadapters():
'''two adapters'''
run("-a AATTTCAGGAATT -a GTTCTCTAGTTCT", "twoadapters.fasta", "twoadapters.fasta")
def test_polya():
'''poly-A tails'''
run("-m 24 -O 10 -a AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "polya.fasta", "polya.fasta")
def test_polya_brace_notation():
'''poly-A tails'''
run("-m 24 -O 10 -a A{35}", "polya.fasta", "polya.fasta")
def test_mask_adapter():
'''mask adapter with N (reads maintain the same length)'''
run("-b CAAG -n 3 --mask-adapter", "anywhere_repeat.fastq", "anywhere_repeat.fastq")
def test_gz_multiblock():
'''compressed gz file with multiple blocks (created by concatenating two .gz files)'''
run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.gz")
def test_suffix():
'''-y/--suffix parameter, combined with _F3'''
run("-c -e 0.12 -a 1=330201030313112312 -y _my_suffix_{name} --strip-f3", "suffix.fastq", "solid.csfasta", 'solid.qual')
def test_read_wildcard():
'''test wildcards in reads'''
run("--match-read-wildcards -b ACGTACGT", "wildcard.fa", "wildcard.fa")
def test_adapter_wildcard():
'''wildcards in adapter'''
for adapter_type, expected in (
("-a", "wildcard_adapter.fa"),
("-b", "wildcard_adapter_anywhere.fa")):
with temporary_path("wildcardtmp.txt") as wildcardtmp:
run("--wildcard-file {0} {1} ACGTNNNACGT".format(wildcardtmp, adapter_type),
expected, "wildcard_adapter.fa")
with open(wildcardtmp) as wct:
lines = wct.readlines()
lines = [ line.strip() for line in lines ]
assert lines == ['AAA 1', 'GGG 2', 'CCC 3b', 'TTT 4b']
def test_wildcard_N():
'''test 'N' wildcard matching with no allowed errors'''
run("-e 0 -a GGGGGGG --match-read-wildcards", "wildcardN.fa", "wildcardN.fa")
def test_illumina_adapter_wildcard():
run("-a VCCGAMCYUCKHRKDCUBBCNUWNSGHCGU", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_front():
'''test adapter in front'''
run("--front ADAPTER -N", "examplefront.fa", "example.fa")
def test_literal_N():
'''test matching literal 'N's'''
run("-N -e 0.2 -a NNNNNNNNNNNNNN", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2():
run("-N -O 1 -g NNNNNNNNNNNNNN", "trimN5.fasta", "trimN5.fasta")
def test_literal_N_brace_notation():
'''test matching literal 'N's'''
run("-N -e 0.2 -a N{14}", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2_brace_notation():
run("-N -O 1 -g N{14}", "trimN5.fasta", "trimN5.fasta")
def test_anchored_front():
run("-g ^FRONTADAPT -N", "anchored.fasta", "anchored.fasta")
def test_anchored_front_ellipsis_notation():
run("-a FRONTADAPT... -N", "anchored.fasta", "anchored.fasta")
def test_anchored_back():
run("-a BACKADAPTER$ -N", "anchored-back.fasta", "anchored-back.fasta")
def test_anchored_back_no_indels():
run("-a BACKADAPTER$ -N --no-indels", "anchored-back.fasta", "anchored-back.fasta")
def test_no_indels():
run('-a TTAGACATAT -g GAGATTGCCA --no-indels', 'no_indels.fasta', 'no_indels.fasta')
def test_issue_46():
'''issue 46 - IndexError with --wildcard-file'''
with temporary_path("wildcardtmp.txt") as wildcardtmp:
run("--anywhere=AACGTN --wildcard-file={0}".format(wildcardtmp), "issue46.fasta", "issue46.fasta")
def test_strip_suffix():
run("--strip-suffix _sequence -a XXXXXXX", "stripped.fasta", "simple.fasta")
def test_info_file():
# The true adapter sequence in the illumina.fastq.gz data set is
# GCCTAACTTCTTAGACTGCCTTAAGGACGT (fourth base is different)
#
with temporary_path("infotmp.txt") as infotmp:
run(["--info-file", infotmp, '-a', 'adapt=GCCGAACTTCTTAGACTGCCTTAAGGACGT'], "illumina.fastq", "illumina.fastq.gz")
assert files_equal(cutpath('illumina.info.txt'), infotmp)
def test_info_file_times():
with temporary_path("infotmp.txt") as infotmp:
run(["--info-file", infotmp, '--times', '2', '-a', 'adapt=GCCGAACTTCTTA', '-a', 'adapt2=GACTGCCTTAAGGACGT'], "illumina5.fastq", "illumina5.fastq")
assert files_equal(cutpath('illumina5.info.txt'), infotmp)
def test_info_file_fasta():
with temporary_path("infotmp.txt") as infotmp:
# Just make sure that it runs
run(['--info-file', infotmp, '-a', 'TTAGACATAT', '-g', 'GAGATTGCCA', '--no-indels'], 'no_indels.fasta', 'no_indels.fasta')
def test_named_adapter():
run("-a MY_ADAPTER=GCCGAACTTCTTAGACTGCCTTAAGGACGT", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_with_U():
run("-a GCCGAACUUCUUAGACUGCCUUAAGGACGU", "illumina.fastq", "illumina.fastq.gz")
def test_no_trim():
''' --no-trim '''
run("--no-trim --discard-untrimmed -a CCCTAGTTAAAC", 'no-trim.fastq', 'small.fastq')
def test_bzip2():
'''test bzip2 support'''
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq.bz2')
try:
import lzma
def test_xz():
'''test xz support'''
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq.xz')
except ImportError:
pass
@raises(SystemExit)
def test_qualfile_only():
with redirect_stderr():
cutadapt.main(['file.qual'])
@raises(SystemExit)
def test_no_args():
with redirect_stderr():
cutadapt.main([])
@raises(SystemExit)
def test_two_fastqs():
with redirect_stderr():
cutadapt.main([datapath('paired.1.fastq'), datapath('paired.2.fastq')])
def test_anchored_no_indels():
'''anchored 5' adapter, mismatches only (no indels)'''
run('-g ^TTAGACATAT --no-indels -e 0.1', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_read():
'''anchored 5' adapter, mismatches only (no indels), but wildcards in the read count as matches'''
run('-g ^TTAGACATAT --match-read-wildcards --no-indels -e 0.1', 'anchored_no_indels_wildcard.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_adapt():
'''anchored 5' adapter, mismatches only (no indels), but wildcards in the adapter count as matches'''
run('-g ^TTAGACANAT --no-indels -e 0.1', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_unconditional_cut_front():
run('-u 5', 'unconditional-front.fastq', 'small.fastq')
def test_unconditional_cut_back():
run('-u -5', 'unconditional-back.fastq', 'small.fastq')
def test_unconditional_cut_both():
run('-u -5 -u 5', 'unconditional-both.fastq', 'small.fastq')
def test_untrimmed_output():
with temporary_path('untrimmed.tmp.fastq') as tmp:
run(['-a', 'TTAGACATATCTCCGTCG', '--untrimmed-output', tmp], 'small.trimmed.fastq', 'small.fastq')
assert files_equal(cutpath('small.untrimmed.fastq'), tmp)
def test_adapter_file():
run('-a file:' + datapath('adapter.fasta'), 'illumina.fastq', 'illumina.fastq.gz')
def test_adapter_file_5p_anchored():
run('-N -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored():
run('-N -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_adapter_file_5p_anchored_no_indels():
run('-N --no-indels -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored_no_indels():
run('-N --no-indels -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_demultiplex():
multiout = os.path.join(os.path.dirname(__file__), 'data', 'tmp-demulti.{name}.fasta')
params = ['-a', 'first=AATTTCAGGAATT', '-a', 'second=GTTCTCTAGTTCT', '-o', multiout, datapath('twoadapters.fasta')]
assert cutadapt.main(params) is None
assert files_equal(cutpath('twoadapters.first.fasta'), multiout.format(name='first'))
assert files_equal(cutpath('twoadapters.second.fasta'), multiout.format(name='second'))
assert files_equal(cutpath('twoadapters.unknown.fasta'), multiout.format(name='unknown'))
os.remove(multiout.format(name='first'))
os.remove(multiout.format(name='second'))
os.remove(multiout.format(name='unknown'))
def test_max_n():
run('--max-n 0', 'maxn0.fasta', 'maxn.fasta')
run('--max-n 1', 'maxn1.fasta', 'maxn.fasta')
run('--max-n 2', 'maxn2.fasta', 'maxn.fasta')
run('--max-n 0.2', 'maxn0.2.fasta', 'maxn.fasta')
run('--max-n 0.4', 'maxn0.4.fasta', 'maxn.fasta')
|
{
"content_hash": "a547280209226b70f87910a1aca0c79d",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 148,
"avg_line_length": 33.45014245014245,
"alnum_prop": 0.6816284813900009,
"repo_name": "Chris7/cutadapt",
"id": "34b3c1ab7af3ec9515ccdf8f3d771b1c53ccdee9",
"size": "11831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178037"
}
],
"symlink_target": ""
}
|
import copy
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.db.models.query import Q
from django.utils.datastructures import SortedDict
class RevisionableModel(models.Model):
base = models.ForeignKey('self', null=True)
title = models.CharField(blank=True, max_length=255)
when = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return u"%s (%s, %s)" % (self.title, self.id, self.base.id)
def save(self, force_insert=False, force_update=False):
super(RevisionableModel, self).save(force_insert, force_update)
if not self.base:
self.base = self
super(RevisionableModel, self).save()
def new_revision(self):
new_revision = copy.copy(self)
new_revision.pk = None
return new_revision
class Order(models.Model):
created_by = models.ForeignKey(User)
text = models.TextField()
__test__ = {"API_TESTS": """
# Regression tests for #7314 and #7372
>>> rm = RevisionableModel.objects.create(title='First Revision', when=datetime.datetime(2008, 9, 28, 10, 30, 0))
>>> rm.pk, rm.base.pk
(1, 1)
>>> rm2 = rm.new_revision()
>>> rm2.title = "Second Revision"
>>> rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0)
>>> rm2.save()
>>> print u"%s of %s" % (rm2.title, rm2.base.title)
Second Revision of First Revision
>>> rm2.pk, rm2.base.pk
(2, 1)
Queryset to match most recent revision:
>>> qs = RevisionableModel.objects.extra(where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % {'table': RevisionableModel._meta.db_table,}],)
>>> qs
[<RevisionableModel: Second Revision (2, 1)>]
Queryset to search for string in title:
>>> qs2 = RevisionableModel.objects.filter(title__contains="Revision")
>>> qs2
[<RevisionableModel: First Revision (1, 1)>, <RevisionableModel: Second Revision (2, 1)>]
Following queryset should return the most recent revision:
>>> qs & qs2
[<RevisionableModel: Second Revision (2, 1)>]
>>> u = User.objects.create_user(username="fred", password="secret", email="fred@example.com")
# General regression tests: extra select parameters should stay tied to their
# corresponding select portions. Applies when portions are updated or otherwise
# moved around.
>>> qs = User.objects.extra(select=SortedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))), select_params=(1, 3))
>>> qs = qs.extra(select={"beta": 4})
>>> qs = qs.extra(select={"alpha": "%s"}, select_params=[5])
>>> result = {'alpha': 5, 'beta': 4, 'gamma': 3}
>>> list(qs.filter(id=u.id).values('alpha', 'beta', 'gamma')) == [result]
True
# Regression test for #7957: Combining extra() calls should leave the
# corresponding parameters associated with the right extra() bit. I.e. internal
# dictionary must remain sorted.
>>> User.objects.extra(select={"alpha": "%s"}, select_params=(1,)).extra(select={"beta": "%s"}, select_params=(2,))[0].alpha
1
>>> User.objects.extra(select={"beta": "%s"}, select_params=(1,)).extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha
2
# Regression test for #7961: When not using a portion of an extra(...) in a
# query, remove any corresponding parameters from the query as well.
>>> list(User.objects.extra(select={"alpha": "%s"}, select_params=(-6,)).filter(id=u.id).values_list('id', flat=True)) == [u.id]
True
# Regression test for #8063: limiting a query shouldn't discard any extra()
# bits.
>>> qs = User.objects.all().extra(where=['id=%s'], params=[u.id])
>>> qs
[<User: fred>]
>>> qs[:1]
[<User: fred>]
# Regression test for #8039: Ordering sometimes removed relevant tables from
# extra(). This test is the critical case: ordering uses a table, but then
# removes the reference because of an optimisation. The table should still be
# present because of the extra() call.
>>> Order.objects.extra(where=["username=%s"], params=["fred"], tables=["auth_user"]).order_by('created_by')
[]
# Regression test for #8819: Fields in the extra(select=...) list should be
# available to extra(order_by=...).
>>> User.objects.filter(pk=u.id).extra(select={'extra_field': 1}).distinct()
[<User: fred>]
>>> User.objects.filter(pk=u.id).extra(select={'extra_field': 1}, order_by=['extra_field'])
[<User: fred>]
>>> User.objects.filter(pk=u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct()
[<User: fred>]
# When calling the dates() method on a queryset with extra selection columns,
# we can (and should) ignore those columns. They don't change the result and
# cause incorrect SQL to be produced otherwise.
>>> RevisionableModel.objects.extra(select={"the_answer": 'id'}).dates('when', 'month')
[datetime.datetime(2008, 9, 1, 0, 0)]
"""}
|
{
"content_hash": "4a219004a6b365b59b8c85094b805241",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 176,
"avg_line_length": 39.88135593220339,
"alnum_prop": 0.6784955376115597,
"repo_name": "weigj/django-multidb",
"id": "680917b8ae5c11668642d1970fe7169a05d3e192",
"size": "4706",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/regressiontests/extra_regress/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "77034"
},
{
"name": "Python",
"bytes": "4173202"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
}
|
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
#!/usr/bin/env python
__author__ = 'Zsolt Lattmann'
__copyright__ = 'Copyright (C) 2013 Vanderbilt University'
__license__ = """
Copyright (C) 2013 Vanderbilt University
Permission is hereby granted, free of charge, to any person obtaining a
copy of this data, including any software or models in source or binary
form, as well as any drawings, specifications, and documentation
(collectively "the Data"), to deal in the Data without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Data, and to
permit persons to whom the Data is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Data.
THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
"""
__status__ = "Prototype"
__maintainer__ = "https://openmodelica.org"
import os
import re
import math
import uuid
import json
import logging
import sys
from optparse import OptionParser
import svgwrite
import OMPython
# OpenModelica setup commands
OMC_SETUP_COMMANDS = ['setCommandLineOptions("+d=nogen,noevalfunc")']
# Bitmap
# extends GraphicItem
# extent
# fileName
# imageSource
regex_equal_key_value = re.compile("([^ =]+) *= *(\"[^\"]*\"|[^ ]*)")
regex_points = re.compile("{([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}")
regex_type_value = re.compile("(\w+.\w+)*")
# Compile regular expressions ONLY once!
# example: {-100.0,-100.0,100.0,100.0,true,0.16,2.0,2.0, {...
regex_coordSys = re.compile('([+-]?\d+(?:.\d+)?),([+-]?\d+(?:.\d+)?),([+-]?\d+(?:.\d+)?),([+-]?\d+(?:.\d+)?),(\w+),([+-]?\d+(?:.\d+)?),([+-]?\d+(?:.\d+)?),([+-]?\d+(?:.\d+)?),')
# example: Rectangle(true, {35.0, 10.0}, 0, {0, 0, 0}, {255, 255, 255}, LinePattern.Solid, FillPattern.Solid, 0.25, BorderPattern.None, {{-15.0, -4.0}, {15.0, 4.0}}, 0
regex_rectangle = re.compile('Rectangle\((\w+), {([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}, ([+-]?\d+(?:.\d+)?), {(\d+), (\d+), (\d+)}, {(\d+), (\d+), (\d+)}, (\w+.\w+), (\w+.\w+), ([+-]?\d+(?:.\d+)?), (\w+.\w+), {{([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}, {([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}}, ([+-]?\d+(?:.\d+)?)')
# example: Line(true, {0.0, 0.0}, 0, {{-30, -120}, {-10, -100}}, {0, 0, 0}, LinePattern.Solid, 0.25, {Arrow.None, Arrow.None}, 3, Smooth.None
regex_line = re.compile('Line\((\w+), {([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}, ([+-]?\d+(?:.\d+)?), ({{[+-]?\d+(?:.\d+)?, [+-]?\d+(?:.\d+)?}(?:, {[+-]?\d+(?:.\d+)?, [+-]?\d+(?:.\d+)?})*}), {(\d+), (\d+), (\d+)}, (\w+.\w+), ([+-]?\d+(?:.\d+)?), {(\w+.\w+), (\w+.\w+)}, ([+-]?\d+(?:.\d+)?), (\w+.\w+)')
# example: Ellipse(true, {0.0, 0.0}, 0, {0, 0, 0}, {95, 95, 95}, LinePattern.Solid, FillPattern.Solid, 0.25, {{-100, 100}, {100, -100}}, 0, 360)}}
regex_ellipse = re.compile('Ellipse\((\w+), {([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}, ([+-]?\d+(?:.\d+)?), {(\d+), (\d+), (\d+)}, {(\d+), (\d+), (\d+)}, (\w+.\w+), (\w+.\w+), ([+-]?\d+(?:.\d+)?), {{([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}, {([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}}, ([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)')
# example: Text(true, {0.0, 0.0}, 0, {0, 0, 255}, {0, 0, 0}, LinePattern.Solid, FillPattern.None, 0.25, {{-150, 110}, {150, 70}}, "%name", 0, TextAlignment.Center
regex_text = re.compile('Text\((\w+), {([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}, ([+-]?\d+(?:.\d+)?), {(\d+), (\d+), (\d+)}, {(\d+), (\d+), (\d+)}, (\w+.\w+), (\w+.\w+), ([+-]?\d+(?:.\d+)?), {{([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}, {([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}}, ("[^"]*"), ([+-]?\d+(?:.\d+)?)(?:, ("[^"]*"))?(?:, {([^}]*)})?, (\w+.\w+)')
# example: Polygon(true, {0.0, 0.0}, 0, {0, 127, 255}, {0, 127, 255}, LinePattern.Solid, FillPattern.Solid, 0.25, {{-24, -34}, {-82, 40}, {-72, 46}, {-14, -26}, {-24, -34}}, Smooth.None
regex_polygon = re.compile('Polygon\((\w+), {([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}, ([+-]?\d+(?:.\d+)?), {(\d+), (\d+), (\d+)}, {(\d+), (\d+), (\d+)}, (\w+.\w+), (\w+.\w+), ([+-]?\d+(?:.\d+)?), ({{[+-]?\d+(?:.\d+)?, [+-]?\d+(?:.\d+)?}(?:, {[+-]?\d+(?:.\d+)?, [+-]?\d+(?:.\d+)?})*}), (\w+.\w+)')
# example: {{-100.0, -100.0}, {-100.0, -30.0}, {0.0, -30.0}, {0.0, 0.0}}
regex_points = re.compile('{([+-]?\d+(?:.\d+)?), ([+-]?\d+(?:.\d+)?)}')
# example: Bitmap(true, {0.0, 0.0}, 0, {{-98, 98}, {98, -98}}, "modelica://Modelica/Resources/Images/Mechanics/MultiBody/Visualizers/TorusIcon.png"
# TODO: where is the imageSource?
# def __ask_omc(question, opt=None, parsed=True):
# p = (question, opt, parsed)
# if p in omc_cache:
# return omc_cache[p]
#
# if opt:
# expression = question + '(' + opt + ')'
# else:
# expression = question
#
# logger.debug('ask_omc: {0} - parsed: {1}'.format(expression, parsed))
#
# try:
# if parsed:
# res = OMPython.execute(expression)
# else:
# res = OMPython.omc.sendExpression(expression)
# except Exception as e:
# logger.error("OMC failed: {0}, {1}, parsed={2}".format(question, opt, parsed))
# raise e
#
# omc_cache[p] = res
#
# return res
omc_cache = {}
graphics_cache = {}
class IconExporter(object):
def __init__(self, omc_session, icon_dir_name):
"""
Creates a new instance of IconExporter and passes in an OMCSession
"""
self.logger = logging.getLogger('py_modelica_exporter.IconExporter')
self.logger.setLevel(logging.NOTSET)
self.logger.info('Initializing IconExporter()')
# start om session
self.omc = omc_session
self.icon_dir_name = icon_dir_name
# get graphics objects from annotation Icon
def get_graphics_for_class(self, modelica_class):
# TODO: does not work if a port (same class) is being used multiple times...
# if modelicaClass in graphics_cache:
# return graphics_cache[modelicaClass]
result = dict()
result['graphics'] = []
# answer2 = ask_omc('getIconAnnotation', modelicaClass, parsed=False)
icon_annotation = self.omc.getIconAnnotation(modelica_class)
result['coordinateSystem'] = {}
result['coordinateSystem']['extent'] = [[-100, -100], [100, 100]]
r = regex_coordSys.search(icon_annotation)
if r:
g = r.groups()
result['coordinateSystem']['extent'] = [[float(g[0]), float(g[1])], [float(g[2]), float(g[3])]]
result['coordinateSystem']['preserveAspectRatio'] = bool(g[4])
result['coordinateSystem']['initialScale'] = float(g[5])
result['coordinateSystem']['grid'] = [float(g[6]), float(g[7])]
withOutCoordSys = icon_annotation[icon_annotation.find(',{'):]
else:
# logger.warning('Coordinate system was skipped')
# logger.warning(answer2)
withOutCoordSys = icon_annotation
for icon_line in withOutCoordSys.split('),'):
# default values
graphicsObj = {}
r = regex_line.search(icon_line)
if r:
graphicsObj['type'] = 'Line'
g = r.groups()
graphicsObj['visible'] = g[0]
graphicsObj['origin'] = [float(g[1]), float(g[2])]
graphicsObj['rotation'] = float(g[3])
points = []
gg = re.findall(regex_points, g[4])
for i in range(0, len(gg)):
points.append([float(gg[i][0]), float(gg[i][1])])
graphicsObj['points'] = points
graphicsObj['color'] = [int(g[5]), int(g[6]), int(g[7])]
graphicsObj['pattern'] = g[8]
graphicsObj['thickness'] = float(g[9])
graphicsObj['arrow'] = [g[10], g[11]]
graphicsObj['arrowSize'] = float(g[12])
graphicsObj['smooth'] = g[13]
r = regex_rectangle.search(icon_line)
if r:
graphicsObj['type'] = 'Rectangle'
g = r.groups()
graphicsObj['visible'] = g[0]
graphicsObj['origin'] = [float(g[1]), float(g[2])]
graphicsObj['rotation'] = float(g[3])
graphicsObj['lineColor'] = [int(g[4]), int(g[5]), int(g[6])]
graphicsObj['fillColor'] = [int(g[7]), int(g[8]), int(g[9])]
graphicsObj['linePattern'] = g[10]
graphicsObj['fillPattern'] = g[11]
graphicsObj['lineThickness'] = float(g[12])
graphicsObj['borderPattern'] = g[13]
graphicsObj['extent'] = [[float(g[14]), float(g[15])], [float(g[16]), float(g[17])]]
graphicsObj['radius'] = float(g[18])
r = regex_polygon.search(icon_line)
if r:
graphicsObj['icon_line'] = icon_line
graphicsObj['type'] = 'Polygon'
g = r.groups()
graphicsObj['visible'] = g[0]
graphicsObj['origin'] = [float(g[1]), float(g[2])]
graphicsObj['rotation'] = float(g[3])
graphicsObj['lineColor'] = [int(g[4]), int(g[5]), int(g[6])]
graphicsObj['fillColor'] = [int(g[7]), int(g[8]), int(g[9])]
graphicsObj['linePattern'] = g[10]
graphicsObj['fillPattern'] = g[11]
graphicsObj['lineThickness'] = float(g[12])
points = []
gg = re.findall(regex_points, g[13])
for i in range(0, len(gg)):
points.append([float(gg[i][0]), float(gg[i][1])])
graphicsObj['points'] = points
minX = 100
minY = 100
maxX = -100
maxY = -100
for point in graphicsObj['points']:
if minX > point[0]:
minX = point[0]
if maxX < point[0]:
maxX = point[0]
if minY > point[1]:
minY = point[1]
if maxY < point[1]:
maxY = point[1]
graphicsObj['extent'] = [[minX, minY], [maxX, maxY]]
graphicsObj['smooth'] = g[14]
r = regex_text.search(icon_line)
if r:
graphicsObj['type'] = 'Text'
g = r.groups()
graphicsObj['visible'] = g[0]
graphicsObj['origin'] = [float(g[1]), float(g[2])]
graphicsObj['rotation'] = float(g[3])
graphicsObj['lineColor'] = [int(g[4]), int(g[5]), int(g[6])]
graphicsObj['fillColor'] = [int(g[7]), int(g[8]), int(g[9])]
graphicsObj['linePattern'] = g[10]
graphicsObj['fillPattern'] = g[11]
graphicsObj['lineThickness'] = float(g[12])
graphicsObj['extent'] = [[float(g[13]), float(g[14])], [float(g[15]), float(g[16])]]
graphicsObj['textString'] = g[17].strip('"')
graphicsObj['fontSize'] = float(g[18])
graphicsObj['fontName'] = g[19]
if graphicsObj['fontName']:
graphicsObj['fontName'] = graphicsObj['fontName'].strip('"')
graphicsObj['textStyle'] = []
if g[20]:
graphicsObj['textStyle'] = regex_type_value.findall(g[20]) # text Style can have different number of styles
graphicsObj['horizontalAlignment'] = g[21]
r = regex_ellipse.search(icon_line)
if r:
g = r.groups()
graphicsObj['type'] = 'Ellipse'
graphicsObj['visible'] = g[0]
graphicsObj['origin'] = [float(g[1]), float(g[2])]
graphicsObj['rotation'] = float(g[3])
graphicsObj['lineColor'] = [int(g[4]), int(g[5]), int(g[6])]
graphicsObj['fillColor'] = [int(g[7]), int(g[8]), int(g[9])]
graphicsObj['linePattern'] = g[10]
graphicsObj['fillPattern'] = g[11]
graphicsObj['lineThickness'] = float(g[12])
graphicsObj['extent'] = [[float(g[13]), float(g[14])], [float(g[15]), float(g[16])]]
graphicsObj['startAngle'] = float(g[17])
graphicsObj['endAngle'] = float(g[18])
if not 'type' in graphicsObj:
graphicsObj['type'] = 'Unknown'
# logger.error('Unknown graphicsObj: {0}'.format(icon_line))
result['graphics'].append(graphicsObj)
graphics_cache[modelica_class] = result
return result
def get_graphics_with_ports_for_class(self, modelica_class):
graphics = self.get_graphics_for_class(modelica_class)
graphics['className'] = modelica_class
graphics['ports'] = []
# answer_full = ask_omc('getComponents', modelicaClass, parsed=False)
answer_full = self.omc.getComponents(modelica_class, parsed=False)
comp_id = 0
for answer in answer_full[2:].split('},{'):
#print answer
comp_id += 1
class_name = answer[0:answer.find(',')]
component_name = answer[answer.find(',') + 1:][0:answer[answer.find(',') + 1:].find(',')]
# if ask_omc('isConnector', class_name):
if self.omc.isConnector(class_name):
try:
comp_annotation = self.omc.getNthComponentAnnotation(modelica_class, comp_id)
# comp_annotation = ask_omc('getNthComponentAnnotation', modelicaClass + ', ' + str(comp_id))['SET2']['Set1']
except KeyError as ex:
self.logger.error('KeyError: {0} componentName: {1} {2}'.format(modelica_class, component_name, ex.message))
continue
# base class graphics for ports
g_base = []
base_classes = []
self.get_base_classes(class_name, base_classes)
for base_class in base_classes:
graphics_base = self.get_graphics_for_class(base_class)
g_base.append(graphics_base)
g = self.get_graphics_for_class(class_name)
g_this = g['graphics']
g['graphics'] = []
for g_b in g_base:
for g_i in g_b['graphics']:
g['graphics'].append(g_i)
for g_b in g_this:
g['graphics'].append(g_b)
g['id'] = component_name
g['className'] = class_name
desc = self.omc.getComponentComment(modelica_class + ', ' + component_name )
# desc = ask_omc('getComponentComment', modelicaClass + ', ' + component_name)
if type(desc) is dict:
g['desc'] = ''
else:
g['desc'] = desc.strip().strip('"')
g['classDesc'] = self.omc.getClassComment(class_name).strip().strip('"')
# g['classDesc'] = ask_omc('getClassComment', class_name).strip().strip('"')
minX = g['coordinateSystem']['extent'][0][0]
minY = g['coordinateSystem']['extent'][0][1]
maxX = g['coordinateSystem']['extent'][1][0]
maxY = g['coordinateSystem']['extent'][1][1]
for gs in g['graphics']:
# use default values if it is not there
if not 'extent' in gs:
gs['extent'] = [[-100, -100], [100, 100]]
if not 'origin' in gs:
gs['origin'] = [0, 0]
if minX > gs['extent'][0][0] + gs['origin'][0]:
minX = gs['extent'][0][0] + gs['origin'][0]
if minX > gs['extent'][1][0] + gs['origin'][0]:
minX = gs['extent'][1][0] + gs['origin'][0]
if minY > gs['extent'][0][1] + gs['origin'][1]:
minY = gs['extent'][0][1] + gs['origin'][1]
if minY > gs['extent'][1][1] + gs['origin'][1]:
minY = gs['extent'][1][1] + gs['origin'][1]
if maxX < gs['extent'][1][0] + gs['origin'][0]:
maxX = gs['extent'][1][0] + gs['origin'][0]
if maxX < gs['extent'][0][0] + gs['origin'][0]:
maxX = gs['extent'][0][0] + gs['origin'][0]
if maxY < gs['extent'][1][1] + gs['origin'][1]:
maxY = gs['extent'][1][1] + gs['origin'][1]
if maxY < gs['extent'][0][1] + gs['origin'][1]:
maxY = gs['extent'][0][1] + gs['origin'][1]
g['coordinateSystem']['extent'] = [[minX, minY], [maxX, maxY]]
#print comp_annotation
index_delta = 7
if comp_annotation[10] == "-":
# fallback to diagram annotations
index_delta = 0
origin_x = comp_annotation[1 + index_delta]
origin_y = comp_annotation[2 + index_delta]
x0 = comp_annotation[3 + index_delta]
y0 = comp_annotation[4 + index_delta]
x1 = comp_annotation[5 + index_delta]
y1 = comp_annotation[6 + index_delta]
rotation = comp_annotation[7 + index_delta]
g['transformation'] = {}
g['transformation']['origin'] = [origin_x, origin_y]
g['transformation']['extent'] = [[x0, y0], [x1, y1]]
g['transformation']['rotation'] = rotation
graphics['ports'].append(g)
return graphics
def get_gradient_colors(self, start_color, stop_color, mid_points):
result = []
startRed = int(start_color[0])
startGreen = int(start_color[1])
startBlue = int(start_color[2])
stopRed = int(stop_color[0])
stopGreen = int(stop_color[1])
stopBlue = int(stop_color[2])
r_delta = (stopRed - startRed) / (mid_points + 1)
g_delta = (stopGreen - startGreen) / (mid_points + 1)
b_delta = (stopBlue - startBlue) / (mid_points + 1)
result.append((startRed, startGreen, startBlue))
for i in range(1, mid_points + 1):
result.append((startRed + i * r_delta, startGreen + i * g_delta, startBlue + i * b_delta))
result.append((stopRed, stopGreen, stopBlue))
return result
def get_coordinates(self, xy, graphics, min_x, max_y, transformation, coordinate_system):
x = xy[0] + graphics['origin'][0]
y = xy[1] + graphics['origin'][1]
# rotation for the icon
s = math.sin(graphics['rotation'] / 180 * 3.1415)
c = math.cos(graphics['rotation'] / 180 * 3.1415)
x -= graphics['origin'][0]
y -= graphics['origin'][1]
xnew = x * c - y * s
ynew = x * s + y * c
x = xnew + graphics['origin'][0]
y = ynew + graphics['origin'][1]
if transformation and coordinate_system:
try:
t_width = abs(max(transformation['extent'][1][0], transformation['extent'][0][0]) - min(transformation['extent'][1][0], transformation['extent'][0][0]))
t_height = abs(max(transformation['extent'][1][1], transformation['extent'][0][1]) - min(transformation['extent'][1][1], transformation['extent'][0][1]))
o_width = abs(max(coordinate_system['extent'][1][0], coordinate_system['extent'][0][0]) - min(coordinate_system['extent'][1][1], coordinate_system['extent'][0][1]))
o_height = abs(max(coordinate_system['extent'][1][1], coordinate_system['extent'][0][1]) - min(coordinate_system['extent'][1][1], coordinate_system['extent'][0][1]))
if 'extent' in transformation and transformation['extent'][1][0] < transformation['extent'][0][0]:
# horizontal flip
x = (-xy[0] + graphics['origin'][0]) / o_width * t_width + transformation['origin'][0] + transformation['extent'][1][0] + t_width / 2
else:
x = (xy[0] + graphics['origin'][0]) / o_width * t_width + transformation['origin'][0] + transformation['extent'][0][0] + t_width / 2
if 'extent' in transformation and transformation['extent'][1][1] < transformation['extent'][0][1]:
# vertical flip
y = (-xy[1] + graphics['origin'][1]) / o_height * t_height + transformation['origin'][1] + min(transformation['extent'][1][1], transformation['extent'][0][1]) + t_height / 2
else:
y = (xy[1] + graphics['origin'][1]) / o_height * t_height + transformation['origin'][1] + min(transformation['extent'][0][1], transformation['extent'][0][1]) + t_height / 2
s = math.sin(transformation['rotation'] / 180 * 3.1415)
c = math.cos(transformation['rotation'] / 180 * 3.1415)
x -= transformation['origin'][0]
y -= transformation['origin'][1]
xnew = x * c - y * s
ynew = x * s + y * c
x = xnew + transformation['origin'][0]
y = ynew + transformation['origin'][1]
except KeyError as ex:
self.logger.error('Component position transformation failed: {0}', ex.message)
self.logger.error(graphics)
x -= min_x
y = max_y - y
return x, y
# get svg object from modelica graphics object
def get_svg_from_graphics(self, dwg, graphics, min_x, max_y, transformation=None, coordinate_system=None):
shape = None
definitions = svgwrite.container.Defs()
origin = None
if not 'origin' in graphics:
graphics['origin'] = (0, 0)
origin = graphics['origin']
if graphics['type'] == 'Rectangle' or graphics['type'] == 'Ellipse' or graphics['type'] == 'Text':
(x0, y0) = self.get_coordinates(graphics['extent'][0], graphics, min_x, max_y, transformation, coordinate_system)
(x1, y1) = self.get_coordinates(graphics['extent'][1], graphics, min_x, max_y, transformation, coordinate_system)
if graphics['type'] == 'Rectangle' or graphics['type'] == 'Ellipse' or graphics['type'] == 'Polygon':
if not 'fillPattern' in graphics:
graphics['fillPattern'] = 'FillPattern.None'
if graphics['type'] == 'Rectangle':
shape = dwg.rect((min(x0, x1), min(y0, y1)), (abs(x1 - x0), abs(y1 - y0)), graphics['radius'], graphics['radius'])
elif graphics['type'] == 'Line':
if 'points' in graphics:
if graphics['smooth'] == 'Smooth.Bezier' and len(graphics['points']) > 2:
# TODO: Optimize this part!!!
shape = svgwrite.path.Path()
x_0, y_0 = self.get_coordinates([graphics['points'][0][0], graphics['points'][0][1]], graphics, min_x, max_y, transformation, coordinate_system)
shape.push('M', x_0, y_0, 'C')
for i in range(1, len(graphics['points']) - 1):
x_0, y_0 = self.get_coordinates([graphics['points'][i-1][0], graphics['points'][i-1][1]], graphics, min_x, max_y, transformation, coordinate_system)
x_1, y_1 = self.get_coordinates([graphics['points'][i][0], graphics['points'][i][1]], graphics, min_x, max_y, transformation, coordinate_system)
x_2, y_2 = self.get_coordinates([graphics['points'][i+1][0], graphics['points'][i+1][1]], graphics, min_x, max_y, transformation, coordinate_system)
x_01 = (x_1 + x_0) / 2
y_01 = (y_1 + y_0) / 2
x_12 = (x_2 + x_1) / 2
y_12 = (y_2 + y_1) / 2
shape.push(x_01, y_01, x_1, y_1, x_12, y_12)
x_n, y_n = self.get_coordinates([graphics['points'][len(graphics['points']) - 1][0], graphics['points'][len(graphics['points']) - 1][1]], graphics, min_x, max_y, transformation, coordinate_system)
shape.push(x_12, y_12, x_n, y_n, x_n, y_n)
else:
shape = dwg.polyline([self.get_coordinates([x, y], graphics, min_x, max_y, transformation, coordinate_system) for (x, y) in graphics['points']])
shape.fill('none', opacity=0)
# markers
if graphics['arrow'][0] != 'Arrow.None':
url_id_start = graphics['arrow'][0] + '_start' + str(uuid.uuid4())
marker = svgwrite.container.Marker(insert=(10, 5), size=(4, 3), orient='auto', id=url_id_start, viewBox="0 0 10 10")
p = svgwrite.path.Path(d="M 10 0 L 0 5 L 10 10 z")
p.fill("rgb(" + ','.join([str(v) for v in graphics['color']]) + ")")
marker.add(p)
definitions.add(marker)
shape['marker-start'] = marker.get_funciri()
if graphics['arrow'][1] != 'Arrow.None':
url_id_end = graphics['arrow'][1] + '_end' + str(uuid.uuid4())
marker = svgwrite.container.Marker(insert=(0, 5), size=(4, 3), orient='auto', id=url_id_end, viewBox="0 0 10 10")
p = svgwrite.path.Path(d="M 0 0 L 10 5 L 0 10 z")
p.fill("rgb(" + ','.join([str(v) for v in graphics['color']]) + ")")
marker.add(p)
definitions.add(marker)
shape['marker-end'] = marker.get_funciri()
else:
self.logger.error('Not handled: {0}'.format(graphics))
return None
elif graphics['type'] == 'Polygon':
if 'points' in graphics:
if graphics['smooth'] == 'Smooth.Bezier' and len(graphics['points']) > 2:
# TODO: Optimize this part!!!
shape = svgwrite.path.Path()
x_0, y_0 = self.get_coordinates([graphics['points'][0][0], graphics['points'][0][1]], graphics, min_x, max_y, transformation, coordinate_system)
shape.push('M', x_0, y_0, 'C')
for i in range(1, len(graphics['points']) - 1):
x_0, y_0 = self.get_coordinates([graphics['points'][i-1][0], graphics['points'][i-1][1]], graphics, min_x, max_y, transformation, coordinate_system)
x_1, y_1 = self.get_coordinates([graphics['points'][i][0], graphics['points'][i][1]], graphics, min_x, max_y, transformation, coordinate_system)
x_2, y_2 = self.get_coordinates([graphics['points'][i+1][0], graphics['points'][i+1][1]], graphics, min_x, max_y, transformation, coordinate_system)
x_01 = (x_1 + x_0) / 2
y_01 = (y_1 + y_0) / 2
x_12 = (x_2 + x_1) / 2
y_12 = (y_2 + y_1) / 2
shape.push(x_01, y_01, x_1, y_1, x_12, y_12)
x_n, y_n = self.get_coordinates([graphics['points'][len(graphics['points']) - 1][0], graphics['points'][len(graphics['points']) - 1][1]], graphics, min_x, max_y, transformation, coordinate_system)
shape.push(x_12, y_12, x_n, y_n, x_n, y_n)
else:
shape = dwg.polygon([self.get_coordinates([x, y], graphics, min_x, max_y, transformation, coordinate_system) for (x, y) in graphics['points']])
shape.fill('none', opacity=0)
else:
self.logger.error('Not handled: {0}'.format(graphics))
return None
elif graphics['type'] == 'Ellipse':
shape = dwg.ellipse(((x0 + x1) / 2, (y0 + y1) / 2), (abs((x1 - x0) / 2), abs((y1 - y0) / 2)))
elif graphics['type'] == 'Text':
extra = {}
x = (x0 + x1) / 2
y = (y0 + y1) / 2
extra['font_family'] = graphics['fontName'] or "Verdana"
if graphics['fontSize'] == 0:
extra['font_size'] = "18"
else:
extra['font_size'] = graphics['fontSize']
for style in graphics['textStyle']:
if style == "TextStyle.Bold":
extra['font-weight'] = 'bold'
elif style == "TextStyle.Italic":
extra['font-style'] = 'italic'
elif style == "TextStyle.UnderLine":
extra['text-decoration'] = 'underline'
extra['alignment_baseline'] = "middle"
if graphics['horizontalAlignment'] == "TextAlignment.Left":
extra['text_anchor'] = "start"
if x0 < x1:
x = x0
else:
x = x1
if y0 < y1:
y = y0
else:
y = y1
elif graphics['horizontalAlignment'] == "TextAlignment.Center":
extra['text_anchor'] = "middle"
elif graphics['horizontalAlignment'] == "TextAlignment.Right":
extra['text_anchor'] = "end"
if x0 < x1:
x = x1
else:
x = x0
if y0 < y1:
y = y1
else:
y = y0
shape = dwg.text(graphics['textString'].replace('%', ''), None, [x], [y], **extra)
if graphics['textString'].find('%') != -1:
extra = {'class': "bbox", 'display': "none"}
xmin = x0
ymin = y0
xmax = x1
ymax = y1
if x0 > x1:
xmin = x1
xmax = x0
if y0 > y1:
ymin = y1
ymax = y0
shape.add(svgwrite.text.TSpan(("{0} {1} {2} {3}".format(xmin, ymin, xmax, ymax)), **extra))
extra = {'class': "data-bind", 'display': "none"}
shape.add(svgwrite.text.TSpan(graphics['textString'], **extra))
else:
self.logger.error('Not handled: {0}'.format(graphics))
return None
dot_size = 4
dash_size = 16
space_size = 8
if 'linePattern' in graphics:
dot_size *= graphics['lineThickness']
dash_size *= graphics['lineThickness']
space_size *= graphics['lineThickness']
if graphics['linePattern'] == 'LinePattern.None' or graphics['type'] == 'Text':
pass
elif graphics['linePattern'] == 'LinePattern.Solid':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width='{0}mm'.format(graphics['lineThickness']))
elif graphics['linePattern'] == 'LinePattern.Dash':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width='{0}mm'.format(graphics['lineThickness']))
shape.dasharray([dash_size, space_size])
elif graphics['linePattern'] == 'LinePattern.Dot':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width='{0}mm'.format(graphics['lineThickness']))
shape.dasharray([dot_size, space_size])
elif graphics['linePattern'] == 'LinePattern.DashDot':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width='{0}mm'.format(graphics['lineThickness']))
shape.dasharray([dash_size, space_size, dot_size, space_size])
elif graphics['linePattern'] == 'LinePattern.DashDotDot':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width='{0}mm'.format(graphics['lineThickness']))
shape.dasharray([dash_size, space_size, dot_size, space_size, dot_size, space_size])
if graphics['type'] == 'Rectangle':
if graphics['borderPattern'] == 'BorderPattern.None':
pass
elif graphics['borderPattern'] == 'BorderPattern.Raised':
url_id = graphics['borderPattern'] + '_' + str(uuid.uuid4())
shape['filter'] = 'url(#' + url_id + ')'
filter = svgwrite.filters.Filter(id=url_id, filterUnits="objectBoundingBox", x="-0.1", y="-0.1", width="1.2", height="1.2")
filter.feGaussianBlur("SourceAlpha", stdDeviation="5", result="alpha_blur")
feSL = filter.feSpecularLighting("alpha_blur", surfaceScale="5", specularConstant="1", specularExponent="20", lighting_color="#FFFFFF", result="spec_light")
feSL.fePointLight((-5000, -10000, 10000))
filter.feComposite("spec_light", in2="SourceAlpha", operator="in", result="spec_light")
filter.feComposite("SourceGraphic", in2="spec_light", operator="out", result="spec_light_fill")
definitions.add(filter)
elif graphics['borderPattern'] == 'BorderPattern.Sunken':
self.logger.warning('Not supported: {0}'.format(graphics['borderPattern']))
elif graphics['borderPattern'] == 'BorderPattern.Engraved':
self.logger.warning('Not supported: {0}'.format(graphics['borderPattern']))
if 'color' in graphics:
try:
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['color']]) + ")", width='{0}mm'.format(graphics['thickness']))
except TypeError as ex:
self.logger.error('{0} {1}'.format(graphics['color'], ex.message))
if 'pattern' in graphics:
dot_size *= graphics['thickness']
dash_size *= graphics['thickness']
space_size *= graphics['thickness']
if graphics['pattern'] == 'LinePattern.None' or graphics['type'] == 'Text':
pass
elif graphics['pattern'] == 'LinePattern.Solid':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['color']]) + ")", width='{0}mm'.format(graphics['thickness']))
elif graphics['pattern'] == 'LinePattern.Dash':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['color']]) + ")", width='{0}mm'.format(graphics['thickness']))
shape.dasharray([dash_size, space_size])
elif graphics['pattern'] == 'LinePattern.Dot':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['color']]) + ")", width='{0}mm'.format(graphics['thickness']))
shape.dasharray([dot_size, space_size])
elif graphics['pattern'] == 'LinePattern.DashDot':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['color']]) + ")", width='{0}mm'.format(graphics['thickness']))
shape.dasharray([dash_size, space_size, dot_size, space_size])
elif graphics['pattern'] == 'LinePattern.DashDotDot':
shape.stroke("rgb(" + ','.join([str(v) for v in graphics['color']]) + ")", width='{0}mm'.format(graphics['thickness']))
shape.dasharray([dash_size, space_size, dot_size, space_size, dot_size, space_size])
if 'fillPattern' in graphics:
if graphics['fillPattern'] == 'FillPattern.None':
if graphics['type'] == 'Text':
shape.fill("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")")
else:
shape.fill('none', opacity=0)
elif graphics['fillPattern'] == 'FillPattern.Solid':
shape.fill("rgb(" + ','.join([str(v) for v in graphics['fillColor']]) + ")")
elif graphics['fillPattern'] == 'FillPattern.Horizontal':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
pattern = svgwrite.pattern.Pattern(id=url_id, insert=(0, 0), size=(5, 5), patternUnits='userSpaceOnUse')
rect = svgwrite.shapes.Rect(insert=(0, 0), size=(5, 5))
rect.fill("rgb(" + ','.join([str(v) for v in graphics['fillColor']]) + ")")
pattern.add(rect)
svg_path = svgwrite.path.Path(d="M0,0 L5,0")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=2)
pattern.add(svg_path)
definitions.add(pattern)
elif graphics['fillPattern'] == 'FillPattern.Vertical':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
pattern = svgwrite.pattern.Pattern(id=url_id, insert=(0, 0), size=(5, 5), patternUnits='userSpaceOnUse')
rect = svgwrite.shapes.Rect(insert=(0, 0), size=(5, 5))
rect.fill("rgb(" + ','.join([str(v) for v in graphics['fillColor']]) + ")")
pattern.add(rect)
svg_path = svgwrite.path.Path(d="M0,0 L0,5")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=2)
pattern.add(svg_path)
definitions.add(pattern)
elif graphics['fillPattern'] == 'FillPattern.Cross':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
pattern = svgwrite.pattern.Pattern(id=url_id, insert=(0, 0), size=(5, 5), patternUnits='userSpaceOnUse')
rect = svgwrite.shapes.Rect(insert=(0, 0), size=(5, 5))
rect.fill("rgb(" + ','.join([str(v) for v in graphics['fillColor']]) + ")")
pattern.add(rect)
svg_path = svgwrite.path.Path(d="M0,0 L5,0")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=2)
pattern.add(svg_path)
svg_path = svgwrite.path.Path(d="M0,0 L0,5")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=2)
pattern.add(svg_path)
definitions.add(pattern)
elif graphics['fillPattern'] == 'FillPattern.Forward':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
pattern = svgwrite.pattern.Pattern(id=url_id, insert=(0, 0), size=(7, 7), patternUnits='userSpaceOnUse')
rect = svgwrite.shapes.Rect(insert=(0, 0), size=(7, 7))
rect.fill("rgb(" + ','.join([str(v) for v in graphics['fillColor']]) + ")")
pattern.add(rect)
svg_path = svgwrite.path.Path(d="M0,0 l7,7")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=1)
pattern.add(svg_path)
svg_path = svgwrite.path.Path(d="M6,-1 l3,3")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=1)
pattern.add(svg_path)
svg_path = svgwrite.path.Path(d="M-1,6 l3,3")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=1)
pattern.add(svg_path)
definitions.add(pattern)
elif graphics['fillPattern'] == 'FillPattern.Backward':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
pattern = svgwrite.pattern.Pattern(id=url_id, insert=(0, 0), size=(7, 7), patternUnits='userSpaceOnUse')
rect = svgwrite.shapes.Rect(insert=(0, 0), size=(7, 7))
rect.fill("rgb(" + ','.join([str(v) for v in graphics['fillColor']]) + ")")
pattern.add(rect)
svg_path = svgwrite.path.Path(d="M7,0 l-7,7")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=1)
pattern.add(svg_path)
svg_path = svgwrite.path.Path(d="M1,-1 l-7,7")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=1)
pattern.add(svg_path)
svg_path = svgwrite.path.Path(d="M8,6 l-7,7")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=1)
pattern.add(svg_path)
definitions.add(pattern)
elif graphics['fillPattern'] == 'FillPattern.CrossDiag':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
pattern = svgwrite.pattern.Pattern(id=url_id, insert=(0, 0), size=(8, 8), patternUnits='userSpaceOnUse')
rect = svgwrite.shapes.Rect(insert=(0, 0), size=(8, 8))
rect.fill("rgb(" + ','.join([str(v) for v in graphics['fillColor']]) + ")")
pattern.add(rect)
svg_path = svgwrite.path.Path(d="M0,0 l8,8")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=1)
pattern.add(svg_path)
svg_path = svgwrite.path.Path(d="M8,0 l-8,8")
svg_path.stroke("rgb(" + ','.join([str(v) for v in graphics['lineColor']]) + ")", width=1)
pattern.add(svg_path)
definitions.add(pattern)
elif graphics['fillPattern'] == 'FillPattern.HorizontalCylinder':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
lineColor = graphics['lineColor']
fillColor = graphics['fillColor']
if not lineColor:
lineColor = 'black'
if not fillColor:
fillColor = 'white'
gradient = svgwrite.gradients.LinearGradient(id=url_id, x1="0%", y1="0%", x2="0%", y2="100%")
colors = self.get_gradient_colors(lineColor, fillColor, 0)
stopValues = [
(0, 0),
(0.3, 1),
(0.7, 1),
(1, 0)
]
for (stopValue, idx) in stopValues:
gradient.add_stop_color(offset=stopValue, color='rgb({0}, {1}, {2})'.format(colors[idx][0], colors[idx][1], colors[idx][2]), opacity=1)
definitions.add(gradient)
elif graphics['fillPattern'] == 'FillPattern.VerticalCylinder':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
lineColor = graphics['lineColor']
fillColor = graphics['fillColor']
if not lineColor:
lineColor = 'black'
if not fillColor:
fillColor = 'white'
gradient = svgwrite.gradients.LinearGradient(id=url_id, x1="0%", y1="0%", x2="100%", y2="0%")
colors = self.get_gradient_colors(lineColor, fillColor, 0)
stopValues = [
(0, 0),
(0.3, 1),
(0.7, 1),
(1, 0)
]
for (stopValue, idx) in stopValues:
gradient.add_stop_color(offset=stopValue, color='rgb({0}, {1}, {2})'.format(colors[idx][0], colors[idx][1], colors[idx][2]), opacity=1)
definitions.add(gradient)
elif graphics['fillPattern'] == 'FillPattern.Sphere':
if graphics['type'] == 'Ellipse':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
lineColor = graphics['lineColor']
fillColor = graphics['fillColor']
if not lineColor:
lineColor = 'black'
if not fillColor:
fillColor = 'white'
gradient = svgwrite.gradients.RadialGradient(id=url_id, cx="50%", cy="50%", r="55%", fx="50%", fy="50%")
colors = self.get_gradient_colors(lineColor, fillColor, 9)
stopValues = [
(0, 10),
(0.45, 8),
(0.7, 6),
(1, 0)
]
for (stopValue, idx) in stopValues:
gradient.add_stop_color(offset=stopValue, color='rgb({0}, {1}, {2})'.format(colors[idx][0], colors[idx][1], colors[idx][2]), opacity=1)
definitions.add(gradient)
elif graphics['type'] == 'Rectangle':
url_id = str(uuid.uuid4())
shape.fill('url(#' + url_id + ')')
lineColor = graphics['lineColor']
fillColor = graphics['fillColor']
if not lineColor:
lineColor = 'black'
if not fillColor:
fillColor = 'white'
gradient = svgwrite.gradients.RadialGradient(id=url_id, cx="50%", cy="50%", r="0.9", fx="50%", fy="50%")
colors = self.get_gradient_colors(lineColor, fillColor, 0)
stopValues = [
(0, 1),
(1, 0)
]
for (stopValue, idx) in stopValues:
gradient.add_stop_color(offset=stopValue, color='rgb({0}, {1}, {2})'.format(colors[idx][0], colors[idx][1], colors[idx][2]), opacity=1)
definitions.add(gradient)
else:
shape.fill('none', opacity=0)
return shape, definitions
# generate svgs from graphics objects
def generate_svg(self, filename, icon_graphics):
width = 100
height = 100
minX = 0
minY = 0
maxX = 100
maxY = 100
for iconGraphic in icon_graphics:
for graphics in iconGraphic['graphics']:
if not 'origin' in graphics:
graphics['origin'] = (0, 0)
if not 'extent' in graphics:
graphics['extent'] = [[-100, -100], [100, 100]]
if 'extent' in graphics:
if minX > graphics['extent'][0][0] + graphics['origin'][0]:
minX = graphics['extent'][0][0] + graphics['origin'][0]
if minX > graphics['extent'][1][0] + graphics['origin'][0]:
minX = graphics['extent'][1][0] + graphics['origin'][0]
if minY > graphics['extent'][0][1] + graphics['origin'][1]:
minY = graphics['extent'][0][1] + graphics['origin'][1]
if minY > graphics['extent'][1][1] + graphics['origin'][1]:
minY = graphics['extent'][1][1] + graphics['origin'][1]
if maxX < graphics['extent'][1][0] + graphics['origin'][0]:
maxX = graphics['extent'][1][0] + graphics['origin'][0]
if maxX < graphics['extent'][0][0] + graphics['origin'][0]:
maxX = graphics['extent'][0][0] + graphics['origin'][0]
if maxY < graphics['extent'][1][1] + graphics['origin'][1]:
maxY = graphics['extent'][1][1] + graphics['origin'][1]
if maxY < graphics['extent'][0][1] + graphics['origin'][1]:
maxY = graphics['extent'][0][1] + graphics['origin'][1]
if 'points' in graphics:
for point in graphics['points']:
if minX > point[0] + graphics['origin'][0]:
minX = point[0] + graphics['origin'][0]
if minY > point[1] + graphics['origin'][1]:
minY = point[1] + graphics['origin'][1]
if maxX < point[0] + graphics['origin'][0]:
maxX = point[0] + graphics['origin'][0]
if maxY < point[1] + graphics['origin'][1]:
maxY = point[1] + graphics['origin'][1]
for port in iconGraphic['ports']:
if minX > port['transformation']['extent'][0][0] + port['transformation']['origin'][0]:
minX = port['transformation']['extent'][0][0] + port['transformation']['origin'][0]
if minX > port['transformation']['extent'][1][0] + port['transformation']['origin'][0]:
minX = port['transformation']['extent'][1][0] + port['transformation']['origin'][0]
if minY > port['transformation']['extent'][0][1] + port['transformation']['origin'][1]:
minY = port['transformation']['extent'][0][1] + port['transformation']['origin'][1]
if minY > port['transformation']['extent'][1][1] + port['transformation']['origin'][1]:
minY = port['transformation']['extent'][1][1] + port['transformation']['origin'][1]
if maxX < port['transformation']['extent'][1][0] + port['transformation']['origin'][0]:
maxX = port['transformation']['extent'][1][0] + port['transformation']['origin'][0]
if maxX < port['transformation']['extent'][0][0] + port['transformation']['origin'][0]:
maxX = port['transformation']['extent'][0][0] + port['transformation']['origin'][0]
if maxY < port['transformation']['extent'][1][1] + port['transformation']['origin'][1]:
maxY = port['transformation']['extent'][1][1] + port['transformation']['origin'][1]
if maxY < port['transformation']['extent'][0][1] + port['transformation']['origin'][1]:
maxY = port['transformation']['extent'][0][1] + port['transformation']['origin'][1]
# ports can have borders
minX -= 5
maxX += 5
minY -= 5
maxY += 5
width = maxX - minX
height = maxY - minY
dwg = svgwrite.Drawing(filename, size=(width, height), viewBox="0 0 " + str(width) + " " + str(height))
dwg.add(svgwrite.base.Desc(icon_graphics[-1]['className']))
for iconGraphic in icon_graphics:
for graphics in iconGraphic['graphics']:
shape_definitions = self.get_svg_from_graphics(dwg, graphics, minX, maxY)
if shape_definitions:
shape, definitions = shape_definitions
if isinstance(shape, svgwrite.text.Text) and shape.text == 'name':
shape.text = filename.split('.')[-2]
dwg.add(shape)
dwg.add(definitions)
for iconGraphic in icon_graphics:
for port in iconGraphic['ports']:
group = dwg.g(id=port['id'])
for graphics in port['graphics']:
svgShape = self.get_svg_from_graphics(dwg, graphics, minX, maxY, port['transformation'], port['coordinateSystem'])
if svgShape:
group.add(svgShape[0])
group.add(svgShape[1])
port_info = dwg.g(id='info', display='none')
port_info.add(svgwrite.text.Text(port['id'], id='name'))
port_info.add(svgwrite.text.Text(port['className'], id='type'))
port_info.add(svgwrite.text.Text(port['classDesc'], id='classDesc'))
port_info.add(svgwrite.text.Text(port['desc'], id='desc'))
group.add(port_info)
dwg.add(group)
dwg.save()
return dwg
def export_icon(self, modelica_class, dir_name=None):
if dir_name == None:
dir_name = self.icon_dir_name
try:
# get all icons
iconGraphics = []
base_classes = []
self.get_base_classes(modelica_class, base_classes)
for base_class in base_classes:
graphics = self.get_graphics_with_ports_for_class(base_class)
iconGraphics.append(graphics)
graphics = self.get_graphics_with_ports_for_class(modelica_class)
iconGraphics.append(graphics)
# with open(os.path.join(output_dir, self.class_to_filename(modelica_class) + '.json'), 'w') as f_p:
# json.dump(iconGraphics, f_p)
# export svgs
svg_file_path = os.path.join(dir_name, self.class_to_filename(modelica_class) + ".svg")
dwg = self.generate_svg(svg_file_path, iconGraphics)
return svg_file_path
except:
return None
def get_base_classes(self, modelica_class, base_classes):
inheritance_cnt = self.omc.getInheritanceCount(modelica_class)
# inheritance_cnt = ask_omc('getInheritanceCount', modelica_class)
for i in range(1, inheritance_cnt + 1):
base_class = self.omc.getNthInheritedClass(modelica_class, str(i))
# base_class = ask_omc('getNthInheritedClass', modelica_class + ', ' + str(i))
if base_class not in base_classes:
base_classes.append(base_class)
self.get_base_classes(base_class, base_classes)
def class_to_filename(self, cl):
"""
The file-system dislikes directory separators, and scripts dislike tokens that expand to other names.
This function uses the same replacement rules as the OpenModelica documentation-generating script.
"""
return cl.replace("/","Division").replace("*","Multiplication")
# def main():
# parser = OptionParser()
# parser.add_option("--with-html", help="Generate an HTML report with all SVG-files", action="store_true", dest="with_html", default=False)
# parser.add_option("--output-dir", help="Directory to generate SVG-files in", type="string", dest="output_dir", default=os.path.abspath('ModelicaIcons'))
# (options, args) = parser.parse_args()
# if len(args) == 0:
# parser.print_help()
# return
# global output_dir
# output_dir = options.output_dir
# with_html = options.with_html
#
# # Inputs
# PACKAGES_TO_LOAD = args
# PACKAGES_TO_LOAD_FROM_FILE = []
# PACKAGES_TO_GENERATE = PACKAGES_TO_LOAD
#
#
# logger.info('Application started')
# logger.info('Output directory: ' + output_dir)
#
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
#
# success = True
#
# for command in OMC_SETUP_COMMANDS:
# print command,":",OMPython.omc.sendExpression(command)
# for package in PACKAGES_TO_LOAD:
# logger.info('Loading package: {0}'.format(package))
# package_load = OMPython.execute('loadModel(' + package + ')')
# logger.info('Load success: {0}'.format(package_load))
# success = success and package_load
#
# for package in PACKAGES_TO_LOAD_FROM_FILE:
# logger.info('Loading package from file: {0}'.format(package))
# package_load = OMPython.execute('loadFile("' + package + '")')
# logger.info('Load success: {0}'.format(package_load))
# success = success and package_load
#
# if success:
# dwgs = []
#
# for package in PACKAGES_TO_GENERATE:
# modelica_classes = ask_omc('getClassNames', package + ', recursive=true, qualified=true, sort=true')['SET1']['Set1']
#
# for modelica_class in modelica_classes:
# logger.info('Exporting: ' + modelica_class)
#
# # try:
# base_classes = []
# getBaseClasses(modelica_class, base_classes)
# dwg = exportIcon(modelica_class, base_classes)
# dwgs.append(dwg)
#
# logger.info('Done: ' + modelica_class)
# # except:
# # print 'FAILED: ' + modelica_class
# if with_html:
# logger.info('Generating HTML file ...')
# with open(os.path.join(output_dir, 'index.html'), 'w') as f_p:
# f_p.write('<html>\n')
# f_p.write('<head>\n')
# f_p.write('</head>\n')
#
# f_p.write('<body>\n')
#
# for dwg in dwgs:
# dwg.write(f_p)
#
# f_p.write('</body>\n')
# f_p.write('</html>\n')
#
# logger.info('HTML file is ready.')
# print "Generated svg's for %d models" % len(dwgs)
#
# logger.info('quit OMC')
# logger.info('End of application')
#
# if __name__ == '__main__':
# main()
|
{
"content_hash": "f267909eabd6e5b1dd6e2f7aef4d8f18",
"timestamp": "",
"source": "github",
"line_count": 1263,
"max_line_length": 357,
"avg_line_length": 48.53206650831354,
"alnum_prop": 0.49946162881754114,
"repo_name": "pombredanne/metamorphosys-desktop",
"id": "0c79c0463384039ac56edbfe6289f6fa838247f9",
"size": "61347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metamorphosys/META/src/Python27Packages/py_modelica_exporter/py_modelica_exporter/generate_icons.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "10683"
},
{
"name": "Assembly",
"bytes": "117345"
},
{
"name": "Awk",
"bytes": "3591"
},
{
"name": "Batchfile",
"bytes": "228118"
},
{
"name": "BitBake",
"bytes": "4526"
},
{
"name": "C",
"bytes": "3613212"
},
{
"name": "C#",
"bytes": "11617773"
},
{
"name": "C++",
"bytes": "51448188"
},
{
"name": "CMake",
"bytes": "3055"
},
{
"name": "CSS",
"bytes": "109563"
},
{
"name": "Clojure",
"bytes": "37831"
},
{
"name": "Eagle",
"bytes": "3782687"
},
{
"name": "Emacs Lisp",
"bytes": "8514"
},
{
"name": "GAP",
"bytes": "49124"
},
{
"name": "Groff",
"bytes": "2178"
},
{
"name": "Groovy",
"bytes": "7686"
},
{
"name": "HTML",
"bytes": "4025250"
},
{
"name": "Inno Setup",
"bytes": "35715"
},
{
"name": "Java",
"bytes": "489537"
},
{
"name": "JavaScript",
"bytes": "167454"
},
{
"name": "Lua",
"bytes": "1660"
},
{
"name": "Makefile",
"bytes": "97209"
},
{
"name": "Mathematica",
"bytes": "26"
},
{
"name": "Matlab",
"bytes": "80874"
},
{
"name": "Max",
"bytes": "78198"
},
{
"name": "Modelica",
"bytes": "44541139"
},
{
"name": "Objective-C",
"bytes": "34004"
},
{
"name": "Perl",
"bytes": "19285"
},
{
"name": "PostScript",
"bytes": "400254"
},
{
"name": "PowerShell",
"bytes": "19749"
},
{
"name": "Processing",
"bytes": "1477"
},
{
"name": "Prolog",
"bytes": "3121"
},
{
"name": "Protocol Buffer",
"bytes": "58995"
},
{
"name": "Python",
"bytes": "5517835"
},
{
"name": "Ruby",
"bytes": "4483"
},
{
"name": "Shell",
"bytes": "956773"
},
{
"name": "Smarty",
"bytes": "37892"
},
{
"name": "TeX",
"bytes": "4183594"
},
{
"name": "Visual Basic",
"bytes": "22546"
},
{
"name": "XSLT",
"bytes": "332312"
}
],
"symlink_target": ""
}
|
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import math
import socket
import collections
import netius
SIZE_UNITS_LIST = (
"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
)
""" The size units list that contains the complete set of
units indexed by the depth they represent """
SIZE_UNITS_LIST_S = (
"B", "K", "M", "G", "T", "P", "E", "Z", "Y"
)
""" The simplified size units list that contains the complete set of
units indexed by the depth they represent """
SIZE_UNIT_COEFFICIENT = 1024
""" The size unit coefficient as an integer value, this is
going to be used in each of the size steps as divisor """
DEFAULT_MINIMUM = 1024
""" The default minimum value meaning that this is the
maximum value that one integer value may have for the
size rounding operation to be performed """
DEFAULT_PLACES = 3
""" The default number of places (digits) that are going
to be used for the string representation in the round
based conversion of size units to be performed """
_HOST = None
""" The globally cached value for the current hostname,
this value is used to avoid an excessive blocking in the
get host by name call, as it is a blocking call """
def cstring(value):
index = value.index("\0")
if index == -1: return value
return value[:index]
def chunks(sequence, count):
for index in range(0, len(sequence), count):
yield sequence[index:index + count]
def header_down(name):
values = name.split("-")
values = [value.lower() for value in values]
return "-".join(values)
def header_up(name):
values = name.split("-")
values = [value.title() for value in values]
return "-".join(values)
def is_ip4(address):
address_p = address.split(".", 4)
if not len(address_p) == 4: return False
for part in address_p:
try: part_i = int(part)
except ValueError: return False
if part_i < 0: return False
if part_i > 255: return False
return True
def is_ip6(address):
if is_ip4(address): return False
return True
def assert_ip4(address, allowed, default = True):
if not allowed: return default
for item in allowed:
is_subnet = "/" in item
if is_subnet: valid = in_subnet_ip4(address, item)
else: valid = address == item
if not valid: continue
return True
return False
def in_subnet_ip4(address, subnet):
subnet, length = subnet.split("/", 1)
size_i = 32 - int(length)
address_a = ip4_to_addr(address)
subnet_a = ip4_to_addr(subnet)
limit_a = subnet_a + pow(2, size_i)
in_subnet = (address_a & subnet_a) == subnet_a
in_subnet &= address_a < limit_a
return in_subnet
def addr_to_ip4(number):
first = int(number / 16777216) % 256
second = int(number / 65536) % 256
third = int(number / 256) % 256
fourth = int(number) % 256
return "%s.%s.%s.%s" % (first, second, third, fourth)
def addr_to_ip6(number):
buffer = collections.deque()
for index in range(8):
offset = index * 2
first = number >> (8 * offset) & 0xff
second = number >> (8 * (offset + 1)) & 0xff
buffer.appendleft("%02x%02x" % (second, first))
return ":".join(buffer)
def ip4_to_addr(value):
first, second, third, fourth = value.split(".", 3)
first_a = int(first) * 16777216
second_a = int(second) * 65536
third_a = int(third) * 256
fourth_a = int(fourth)
return first_a + second_a + third_a + fourth_a
def string_to_bits(value):
return bin(netius.legacy.reduce(lambda x, y : (x << 8) + y, (netius.legacy.ord(c) for c in value), 1))[3:]
def integer_to_bytes(number, length = 0):
if not isinstance(number, netius.legacy.INTEGERS):
raise netius.DataError("Invalid data type")
bytes = []
number = abs(number)
while number > 0:
bytes.append(chr(number & 0xff))
number >>= 8
remaining = length - len(bytes)
remaining = 0 if remaining < 0 else remaining
for _index in range(remaining): bytes.append("\x00")
bytes = reversed(bytes)
bytes_s = "".join(bytes)
bytes_s = netius.legacy.bytes(bytes_s)
return bytes_s
def bytes_to_integer(bytes):
if not type(bytes) == netius.legacy.BYTES:
raise netius.DataError("Invalid data type")
number = 0
for byte in bytes: number = (number << 8) | netius.legacy.ord(byte)
return number
def random_integer(number_bits):
"""
Generates a random integer of approximately the
size of the provided number bits bits rounded up
to whole bytes.
:type number_bits: int
:param number_bits: The number of bits of the generated
random integer, this value will be used as the basis
for the calculus of the required bytes.
:rtype: int
:return: The generated random integer, should be provided
with the requested size.
"""
# calculates the number of bytes to represent the number
# by dividing the number of bits by a byte and then rounding
# the value to the next integer value
number_bytes = math.ceil(number_bits / 8.0)
number_bytes = int(number_bytes)
# generates a random data string with the specified
# number of bytes in length
random_data = os.urandom(number_bytes)
# converts the random data into an integer and then
# makes sure the last bit of the value is correctly
# filled with data, and returns it to the caller method
random_integer = bytes_to_integer(random_data)
random_integer |= 1 << (number_bits - 1)
return random_integer
def host(default = "127.0.0.1"):
"""
Retrieves the host for the current machine,
typically this would be the ipv4 address of
the main network interface.
No result type are guaranteed and a local address
(eg: 127.0.0.1) may be returned instead.
The returned value is cached to avoid multiple
blocking calls from blocking the processor.
:type default: String
:param default: The default value that is going to
be returned in case no resolution is possible, take
into account that this result is going to be cached.
:rtype: Strong
:return: The string that contains the host address
as defined by specification for the current machine.
"""
global _HOST
if _HOST: return _HOST
hostname = socket.gethostname()
try: _HOST = socket.gethostbyname(hostname)
except socket.gaierror: _HOST = default
is_unicode = type(_HOST) == netius.legacy.OLD_UNICODE
if is_unicode: _HOST = _HOST.encode("utf-8")
return _HOST
def hostname():
"""
The name as a simple string o the name of the current
local machine. This value may or may not be a fully
qualified domain name for the machine.
The result of this function call is unpredictable and
should not be trusted for critical operations.
:rtype: String
:return: The name as a string of the current local
machine, the definition of this value varies.
"""
return socket.gethostname()
def size_round_unit(
size_value,
minimum = DEFAULT_MINIMUM,
places = DEFAULT_PLACES,
reduce = True,
space = False,
justify = False,
simplified = False,
depth = 0
):
"""
Rounds the size unit, returning a string representation
of the value with a good rounding precision.
This method should be used to round data sizing units.
Note that using the places parameter it's possible to control
the number of digits (including decimal places) of the
number that is going to be "generated".
:type size_value: int/float
:param size_value: The current size value (in bytes).
:type minimum: int
:param minimum: The minimum value to be used.
:type places: int
:param places: The target number of digits to be used for
describing the value to be used for output, this is going
to be used to calculate the proper number of decimal places.
:type reduce: bool
:param reduce: If the final string value should be reduced
meaning that right decimal zeros should be removed as they
represent an extra unused value.
:type space: bool
:param space: If a space character must be used dividing
the value from the unit symbol.
:type justify: bool
:param justify: If the size string value should be (right)
justified important for properly aligned values in a table.
:type simplified: bool
:param simplified: If the simplified version of the units
should be used instead of the longer one.
:type depth: int
:param depth: The current iteration depth value.
:rtype: String
:return: The string representation of the data size
value in a simplified manner (unit).
"""
# in case the current size value is acceptable (less than
# the minimum) this is the final iteration and the final
# string representation is going to be created
if size_value < minimum:
# calculates the maximum size of the string that is going
# to represent the base size value as the number of places
# plus one (representing the decimal separator character)
size_s = places + 1
# calculates the target number of decimal places taking
# into account the size (in digits) of the current size
# value, this may never be a negative number
log_value = size_value and math.log10(size_value)
digits = int(log_value) + 1
places = places - digits
places = places if places > 0 else 0
# creates the proper format string that is going to
# be used in the creation of the proper float value
# according to the calculated number of places
format = "%%.%df" % places
# rounds the size value, then converts the rounded
# size value into a string based representation
size_value = round(size_value, places)
size_value_s = format % size_value
# forces the reduce flag when the depth is zero, meaning
# that an integer value will never be decimal, this is
# required to avoid strange results for depth zero
reduce = reduce or depth == 0
# in case the dot value is not present in the size value
# string adds it to the end otherwise an issue may occur
# while removing extra padding characters for reduce
if reduce and not "." in size_value_s: size_value_s += "."
# strips the value from zero appended to the right and
# then strips the value also from a possible decimal
# point value that may be included in it, this is only
# performed in case the reduce flag is enabled
if reduce: size_value_s = size_value_s.rstrip("0")
if reduce: size_value_s = size_value_s.rstrip(".")
# in case the justify flag is set runs the justification
# process on the size value taking into account the maximum
# size of the associated size string
if justify: size_value_s = size_value_s.rjust(size_s)
# retrieves the size unit (string mode) for the current
# depth according to the provided map
if simplified: size_unit = SIZE_UNITS_LIST_S[depth]
else: size_unit = SIZE_UNITS_LIST[depth]
# retrieves the appropriate separator based
# on the value of the space flag
separator = space and " " or ""
# creates the size value string appending the rounded
# size value string and the size unit and returns it
# to the caller method as the size value string
size_value_string = size_value_s + separator + size_unit
return size_value_string
# otherwise the value is not acceptable and a new iteration
# must be ran with one less depth of size value
else:
# re-calculates the new size value, increments the depth
# and runs the size round unit again with the new values
new_size_value = float(size_value) / SIZE_UNIT_COEFFICIENT
new_depth = depth + 1
return size_round_unit(
new_size_value,
minimum = minimum,
places = places,
reduce = reduce,
space = space,
justify = justify,
simplified = simplified,
depth = new_depth
)
def verify(condition, message = None, exception = None):
"""
Ensures that the requested condition returns a valid value
and if that's no the case an exception raised breaking the
current execution logic.
:type condition: bool
:param condition: The condition to be evaluated and that may
trigger an exception raising.
:type message: String
:param message: The message to be used in the building of the
exception that is going to be raised in case of condition failure.
:type exception: Class
:param exception: The exception class that is going to be used
to build the exception to be raised in case the condition
verification operation fails.
"""
if condition: return
exception = exception or netius.AssertionError
raise exception(message or "Assertion Error")
def verify_equal(first, second, message = None, exception = None):
message = message or "Expected %s got %s" % (repr(second), repr(first))
return verify(
first == second,
message = message,
exception = exception
)
def verify_not_equal(first, second, message = None, exception = None):
message = message or "Expected %s not equal to %s" % (repr(first), repr(second))
return verify(
not first == second,
message = message,
exception = exception
)
def verify_type(value, types, null = True, message = None, exception = None, **kwargs):
message = message or "Expected %s to have type %s" % (repr(value), repr(types))
return verify(
(null and value == None) or isinstance(value, types),
message = message,
exception = exception,
**kwargs
)
def verify_many(sequence, message = None, exception = None):
for condition in sequence:
verify(
condition,
message = message,
exception = exception
)
|
{
"content_hash": "3cf9e5a232cf366f29a9975f21dfd8f6",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 110,
"avg_line_length": 35.39325842696629,
"alnum_prop": 0.6421587301587302,
"repo_name": "hivesolutions/netius",
"id": "887c58341bf97017b891e1e97852c15cc833d41e",
"size": "15796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/netius/common/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1400497"
}
],
"symlink_target": ""
}
|
"""Unit tests for modules/questionnaire."""
__author__ = [
'johncox@google.com (John Cox)',
]
from tests.unit import javascript_tests
class JavaScriptTests(javascript_tests.TestBase):
def test_scripts(self):
self.karma_test('modules/questionnaire/javascript_tests')
|
{
"content_hash": "52cd9e747fe909bf12b5dc0f0a01b056",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 65,
"avg_line_length": 20.571428571428573,
"alnum_prop": 0.7083333333333334,
"repo_name": "ram8647/gcb-mobilecsp",
"id": "a1d396f579ecf9c9e20ba70ece18428e1a7f138c",
"size": "886",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "modules/questionnaire/questionnaire_unit_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "122290"
},
{
"name": "HTML",
"bytes": "486625"
},
{
"name": "JavaScript",
"bytes": "620039"
},
{
"name": "Python",
"bytes": "5013996"
},
{
"name": "Shell",
"bytes": "36511"
}
],
"symlink_target": ""
}
|
import tinctest
from mpp.gpdb.tests.storage.lib.sql_isolation_testcase import SQLIsolationTestCase
from gppylib.commands.base import Command
from resource_management.runaway_query.runaway_udf import *
from mpp.lib.PSQL import PSQL
def _set_VLIM_SLIM_REDZONEPERCENT(vlimMB, slimMB, activationPercent):
# Set up GUCs for VLIM (gp_vmem_protect_limit), SLIM (gp_vmem_limit_per_query) and RQT activation percent (runaway_detector_activation_percent)
tinctest.logger.info('Setting GUCs for VLIM gp_vmem_protect_limit=%dMB, SLIM gp_vmem_limit_per_query=%dMB and RQT activation percent runaway_detector_activation_percent=%s'%(vlimMB, slimMB, activationPercent))
Command('Run gpconfig to set GUC gp_vmem_protect_limit',
'source $GPHOME/greenplum_path.sh;gpconfig -c gp_vmem_protect_limit -v %d' % vlimMB).run(validateAfter=True)
Command('Run gpconfig to set GUC gp_vmem_limit_per_query',
'source $GPHOME/greenplum_path.sh;gpconfig -c gp_vmem_limit_per_query -v %d --skipvalidation' % (slimMB * 1024)).run(validateAfter=True)
Command('Run gpconfig to set GUC runaway_detector_activation_percent',
'source $GPHOME/greenplum_path.sh;gpconfig -c runaway_detector_activation_percent -v %d --skipvalidation' % activationPercent).run(validateAfter=True)
# Restart DB
Command('Restart database for GUCs to take effect',
'source $GPHOME/greenplum_path.sh && gpstop -ar').run(validateAfter=True)
def _reset_VLIM_SLIM_REDZONEPERCENT():
# Reset GUCs for VLIM (gp_vmem_protect_limit), SLIM (gp_vmem_limit_per_query) and RQT activation percent (runaway_detector_activation_percent)
tinctest.logger.info('Resetting GUCs for VLIM gp_vmem_protect_limit, SLIM gp_vmem_limit_per_query, and RQT activation percent runaway_detector_activation_percent')
Command('Run gpconfig to reset GUC gp_vmem_protect_limit',
'source $GPHOME/greenplum_path.sh;gpconfig -c gp_vmem_protect_limit -v 8192').run(validateAfter=True)
Command('Run gpconfig to reset GUC gp_vmem_limit_per_query',
'source $GPHOME/greenplum_path.sh;gpconfig -r gp_vmem_limit_per_query --skipvalidation').run(validateAfter=True)
Command('Run gpconfig to reset GUC runaway_detector_activation_percent',
'source $GPHOME/greenplum_path.sh;gpconfig -r runaway_detector_activation_percent --skipvalidation').run(validateAfter=True)
# Restart DB
Command('Restart database for GUCs to take effect',
'source $GPHOME/greenplum_path.sh && gpstop -ar').run(validateAfter=True)
class RunawayDetectorTestCase(SQLIsolationTestCase):
"""
@tags runaway_query_termination
"""
'''
Test for Runaway Query Termination that require concurrent sessions
'''
def _infer_metadata(self):
super(RunawayDetectorTestCase, self)._infer_metadata()
try:
self.vlimMB = int(self._metadata.get('vlimMB', '8192')) # Default is 8192
self.slimMB = int(self._metadata.get('slimMB', '0')) # Default is 0
self.activationPercent = int(self._metadata.get('redzone', '80')) # Default is 80
except Exception:
tinctest.logger.info("Error getting the testcase related metadata")
raise
def faultInjector(self, faultIdentifier, faultType, segId, sleepTime=10, numOccurences=1):
tinctest.logger.info('Injecting fault: id=%s, fault=%s, segId=%d' %
(faultIdentifier, faultType, segId))
finjectCmd = 'source $GPHOME/greenplum_path.sh; '\
'gpfaultinjector -f %s '\
'-y %s --seg_dbid %d ' \
'--sleep_time_s=%d '\
'-o %d ' % (faultIdentifier, faultType, segId, sleepTime, numOccurences)
tinctest.logger.info('Fault injector command: ' + finjectCmd)
gpfaultinjector = Command('fault injector', finjectCmd)
gpfaultinjector.run()
def setUp(self):
_set_VLIM_SLIM_REDZONEPERCENT(self.vlimMB, self.slimMB, self.activationPercent)
# segid = 2, sleepTime = 20, numOccurences = 0
# numOccurences = 0 means we'll keep triggering the fault until we reset it
self.faultInjector('runaway_cleanup', 'sleep', 2, 20, 0)
return super(RunawayDetectorTestCase, self).setUp()
def tearDown(self):
self.faultInjector('runaway_cleanup', 'reset', 2)
return super(RunawayDetectorTestCase, self).tearDown()
@classmethod
def setUpClass(cls):
super(RunawayDetectorTestCase, cls).setUpClass()
create_runaway_udf()
create_session_state_view()
@classmethod
def tearDownClass(cls):
drop_session_state_view()
drop_runaway_udf()
_reset_VLIM_SLIM_REDZONEPERCENT()
sql_dir = 'sql/'
ans_dir = 'expected'
out_dir = 'output/'
|
{
"content_hash": "14def84501a07fb6149814a57fd3cd4f",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 213,
"avg_line_length": 51.642105263157895,
"alnum_prop": 0.6718304117407257,
"repo_name": "kaknikhil/gpdb",
"id": "36961f81f271b4e974c08736f4deebc7d465eb27",
"size": "4906",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/resource_management/runaway_query/runaway_detector/test_runaway_detector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35311132"
},
{
"name": "C++",
"bytes": "3781313"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "734463"
},
{
"name": "HTML",
"bytes": "191406"
},
{
"name": "Java",
"bytes": "268244"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196275"
},
{
"name": "M4",
"bytes": "104559"
},
{
"name": "Makefile",
"bytes": "434729"
},
{
"name": "PLSQL",
"bytes": "261269"
},
{
"name": "PLpgSQL",
"bytes": "5487022"
},
{
"name": "Perl",
"bytes": "3893346"
},
{
"name": "Perl 6",
"bytes": "14377"
},
{
"name": "Python",
"bytes": "8690818"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3824391"
},
{
"name": "Shell",
"bytes": "544188"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "488932"
}
],
"symlink_target": ""
}
|
from play import Play
from behavior import Behavior
import plays.stopped
import plays.testing.test_coach
import logging
from PyQt5 import QtCore
import main
import evaluation.double_touch
import tactics.positions.goalie
import role_assignment
import traceback
## The RootPlay is basically the python-side of the c++ GameplayModule
# it coordinates the selection of the 'actual' play and handles the goalie behavior
class RootPlay(Play, QtCore.QObject):
def __init__(self):
QtCore.QObject.__init__(self)
Play.__init__(self, continuous=True)
self._play = None
self._goalie_id = None
self.add_transition(Behavior.State.start, Behavior.State.running,
lambda: True, 'immediately')
# if a play fails for some reason, we can temporarily blacklist it, which removes it from play
# selection for the next iteration, then enables it again
self.temporarily_blacklisted_play_class = None
self._currently_restarting = False
play_changed = QtCore.pyqtSignal("QString")
def execute_running(self):
# update double touch tracker
evaluation.double_touch.tracker().spin()
# cache and calculate the score() function for each play class
main.play_registry().recalculate_scores()
# Play Selection
################################################################################
if main.game_state().is_stopped():
evaluation.double_touch.tracker().restart()
if main.game_state().is_placement():
if not isinstance(self.play,
plays.restarts.placement.Placement):
logging.info("Placing Ball")
self.play = plays.restarts.placement.Placement()
self._currently_restarting = True
else:
if self.play is None or not self.play.run_during_stopped():
logging.info(
"Running 'Stopped' play due to game state change")
self.play = plays.stopped.Stopped()
self._currently_restarting = True
elif main.game_state().is_halted():
evaluation.double_touch.tracker().restart()
self.play = None
else:
# (play_class, score value) tuples
enabled_plays_and_scores = [
p for p in main.play_registry().get_enabled_plays_and_scores()
]
# only let restart play run once
enabled_plays_and_scores = [
p
for p in enabled_plays_and_scores
if not p[0].is_restart() or (p[0].is_restart() and
self._currently_restarting)
]
# handle temporary blacklisting
# we remove the blacklisted play class from selection for this iteration, then unblacklist it
enabled_plays_and_scores = [
p
for p in enabled_plays_and_scores
if p[0] != self.temporarily_blacklisted_play_class
]
self.temporarily_blacklisted_play_class = None
# see if we need to kill current play or if it's done running
if self.play is not None:
if self.play.__class__ not in map(lambda tup: tup[0],
enabled_plays_and_scores):
logging.info("Current play '" +
self.play.__class__.__name__ +
"' no longer enabled, aborting")
self.play.terminate()
self.play = None
elif self.play.is_done_running():
logging.info("Current play '" +
self.play.__class__.__name__ +
"' finished running")
if self.play.is_restart:
self._currently_restarting = False
self.play = None
elif self.play.__class__.score() == float("inf"):
logging.info("Current play '" +
self.play.__class__.__name__ +
"' no longer applicable, ending")
self.play.terminate()
self.play = None
if self.play is None:
try:
if len(enabled_plays_and_scores) > 0:
# select the play with the smallest value for score()
play_class_and_score = min(enabled_plays_and_scores,
key=lambda tup: tup[1])
# run the play with the lowest score, as long as it isn't inf
if play_class_and_score[1] != float("inf"):
play_class = play_class_and_score[0]
self.play = play_class() # instantiate it
else:
# there's no available plays to run
pass
except Exception as e:
logging.error("Exception occurred during play selection: "
+ str(e))
traceback.print_exc()
if self.play is not None:
logging.info("Chose new play: '" +
self.play.__class__.__name__ + "'")
# Role Assignment
################################################################################
try:
assignments = role_assignment.assign_roles(
self.robots, self.role_requirements())
except role_assignment.ImpossibleAssignmentError as e:
logging.error(
"Unable to satisfy role assignment constraints. Dropping and temp. blacklisting current play...")
self.drop_current_play(temporarily_blacklist=True)
else:
self.assign_roles(assignments)
def handle_subbehavior_exception(self, name, exception):
if name == 'goalie':
logging.error("Goalie encountered an exception: " + str(exception)
+ ". Reloading goalie behavior")
traceback.print_exc()
self.drop_goalie_behavior()
else:
logging.error("Play '" + self.play.__class__.__name__ +
"' encountered an exception: " + str(exception) +
". Dropping and temp. blacklisting current play...")
traceback.print_exc()
self.drop_current_play(temporarily_blacklist=True)
# this is used to force a reselection of a play
def drop_current_play(self, temporarily_blacklist=False):
self.temporarily_blacklisted_play_class = self.play.__class__
self.play = None
# this is called when the goalie behavior must be reloaded (for example when the goalie.py file is modified)
def drop_goalie_behavior(self):
if self.has_subbehavior_with_name('goalie'):
self.remove_subbehavior('goalie')
self.setup_goalie_if_needed()
@property
def play(self):
return self._play
@play.setter
def play(self, value):
# trash old play
if self.play is not None:
self.remove_subbehavior('play')
self._play = None
if value is not None:
self._play = value
# see if this play handles the goalie by itself
if value.__class__.handles_goalie():
self.drop_goalie_behavior()
self.add_subbehavior(value, name='play', required=True)
# make sure somebody handles the goalie
self.setup_goalie_if_needed()
# change notification so ui can update if necessary
self.play_changed.emit(self.play.__class__.__name__ if self._play is
not None else "(No Play)")
## the c++ GameplayModule reaches through the language portal and sets this
# note that in c++, a value of -1 indicates no assigned goalie, in python we represent the same thing with None
@property
def goalie_id(self):
return self._goalie_id
@goalie_id.setter
def goalie_id(self, value):
self._goalie_id = None if value == -1 else value
self.setup_goalie_if_needed()
logging.info("goalie_id set to: " + str(self._goalie_id))
def setup_goalie_if_needed(self):
if self.goalie_id is None:
if self.has_subbehavior_with_name('goalie'):
self.remove_subbehavior('goalie')
else:
if self.has_subbehavior_with_name('goalie'):
goalie = self.subbehavior_with_name('goalie')
elif self.play is None or not self.play.__class__.handles_goalie():
goalie = tactics.positions.goalie.Goalie()
self.add_subbehavior(goalie, 'goalie', required=True)
else:
goalie = None
if goalie is not None:
goalie.shell_id = self.goalie_id
@property
def robots(self):
return self._robots
@robots.setter
def robots(self, robots):
self._robots = robots if robots is not None else []
def __str__(self):
return '\n'.join([str(bhvr) for bhvr in self.all_subbehaviors()])
|
{
"content_hash": "ccf6db3378db6d44c3afd764da807224",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 115,
"avg_line_length": 41.66079295154185,
"alnum_prop": 0.5308237284551126,
"repo_name": "JNeiger/robocup-software",
"id": "5886ab43dd5976d8bb073ef0b3fd859df55af229",
"size": "9457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soccer/gameplay/root_play.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2990"
},
{
"name": "C++",
"bytes": "1083792"
},
{
"name": "CMake",
"bytes": "112437"
},
{
"name": "Dockerfile",
"bytes": "2872"
},
{
"name": "MATLAB",
"bytes": "31229"
},
{
"name": "Makefile",
"bytes": "5816"
},
{
"name": "Python",
"bytes": "735005"
},
{
"name": "Shell",
"bytes": "21468"
}
],
"symlink_target": ""
}
|
'''
Created on 15 Aug 2011
@author: kfuchsbe
'''
class JPyMadGlobals():
java_gateway = None
jmad_service = None
enums = None
|
{
"content_hash": "68fc76b17e382b4d06efe0277783ec34",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 23,
"avg_line_length": 15.333333333333334,
"alnum_prop": 0.644927536231884,
"repo_name": "pymad/jpymad",
"id": "38c33def25f377b72067cab7438b65b376ac6d66",
"size": "924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cern/jpymad/globals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8170"
},
{
"name": "Perl",
"bytes": "22155"
},
{
"name": "Python",
"bytes": "215528"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
}
|
"""OS X Launchd process listing test data.
These dicts are python representations of the pyobjc NSCFDictionarys returned by
the ServiceManagement framework. It's close enough to the pyobjc object that we
can use it to test the parsing code without needing to run on OS X.
"""
# Disable some lint warnings to avoid tedious fixing of test data
# pylint: disable=g-line-too-long
# Number of entries we expect to be dropped due to filtering
FILTERED_COUNT = 84
class FakeCFDict(object):
"""Fake out the CFDictionary python wrapper."""
def __init__(self, value):
self.value = value
def __contains__(self, key):
return key in self.value
def __getitem__(self, key):
return self.value[key]
# pylint: disable=g-bad-name
def get(self, key, default='', stringify=False):
if key in self.value:
if stringify:
obj = str(self.value[key])
else:
obj = self.value[key]
else:
obj = default
return obj
# pylint: enable=g-bad-name
class FakeCFObject(object):
"""Fake CFString and other wrapped objects."""
def __init__(self, value):
self.value = value
def __int__(self):
return int(self.value)
JOB = [
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.FileSyncAgent.PHD',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.FileSyncAgent.PHD': 0,
'com.apple.FileSyncAgent.PHD.isRunning': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/FileSyncAgent.app/Contents/MacOS/FileSyncAgent'
),
FakeCFObject('-launchedByLaunchd'),
FakeCFObject('-PHDPlist')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
]
JOBS = [
FakeCFDict({
'Label':
'0x7f8759d20ab0.mach_init.Inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'[0x0-0x4d44d4].com.google.GoogleTalkPluginD[32298].subset.257',
'MachServices': {
'com.Google.BreakpadInspector32298': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Library/Application '
'Support/Google/GoogleTalkPlugin.app/Contents/Frameworks/GoogleBreakpad.framework/Versions/A/Resources/Inspector'
),
FakeCFObject('com.Google.BreakpadInspector32298')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'0x7f8759c23570.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[32284].subset.584',
'MachServices': {
'com.Breakpad.Inspector32284': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google '
'Chrome.app/Contents/Versions/21.0.1180.79/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector32284')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': 'com.apple.coreservices.appleid.authentication',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {
'com.apple.coreservices.appleid.authentication': 0,
},
'OnDemand': FakeCFObject(1),
'Program': '/System/Library/CoreServices/AppleIDAuthAgent',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d30310.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[35271].subset.440',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c23ae0.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32282].subset.281',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'0x7f8759d30610.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[35271].subset.440',
'MachServices': {
'com.Breakpad.Inspector35271': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google '
'Chrome.app/Contents/Versions/21.0.1180.79/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector35271')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': 'com.apple.systemprofiler',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {
'com.apple.systemprofiler': 0,
},
'OnDemand': FakeCFObject(1),
'Program': '/Applications/Utilities/System '
'Information.app/Contents/MacOS/System Information',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d2b140.anonymous.bash',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(69813),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d318d0.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(60522),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d1fb70.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32285),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c22f60.anonymous.Google Chrome',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32284].subset.584',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32275),
'Program': 'Google Chrome',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.FontWorker',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.FontWorker': 0,
'com.apple.FontWorker.ATS': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/ATS.framework/Versions/A/Support/fontworker',
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'0x7f8759d1d200.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'[0x0-0x4c54c5].com.google.Chrome[32275].subset.632',
'MachServices': {
'com.Breakpad.Inspector32275': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google '
'Chrome.app/Contents/Versions/21.0.1180.79/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector32275')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.UserNotificationCenterAgent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.UNCUserNotificationAgent': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/UserNotificationCenter.app/Contents/MacOS/UserNotificationCenter'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d30f40.anonymous.Google Chrome C',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[60520].subset.399',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(60513),
'Program': 'Google Chrome C',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.bluetoothUIServer',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.bluetoothUIServer': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/CoreServices/BluetoothUIServer.app/Contents/MacOS/BluetoothUIServer',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.SubmitDiagInfo',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject('/System/Library/CoreServices/SubmitDiagInfo')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'EnableTransactions': 1,
'Label': 'com.apple.gssd-agent',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'MachServices': {
'com.apple.gssd-agent': 0,
},
'OnDemand': FakeCFObject(1),
'Program': '/usr/sbin/gssd',
'ProgramArguments': [FakeCFObject('gssd-agent')],
'TimeOut': FakeCFObject(30),
'TransactionCount': '-1',
}),
FakeCFDict({
'Label':
'[0x0-0x4d44d4].com.google.GoogleTalkPluginD',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(32298),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.CFPasteboardClient': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'ProgramArguments': [
FakeCFObject(
'/Library/Application '
'Support/Google/GoogleTalkPlugin.app/Contents/MacOS/GoogleTalkPlugin'
),
FakeCFObject('-psn_0_5063892')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.quicklook.config',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.quicklook.config': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/QuickLook.framework/Resources/quicklookconfig'
)
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759c2fda0.anonymous.login',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(83461),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c12410.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32297),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.CFPasteboardClient': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c2cec0.anonymous.bash',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(73991),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c24ca0.anonymous.login',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(24592),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'0x7f8759c17720.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[35104].subset.553',
'MachServices': {
'com.Breakpad.Inspector35104': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google '
'Chrome.app/Contents/Versions/21.0.1180.79/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector35104')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d1cf00.anonymous.login',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(38234),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c2e870.anonymous.configd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(17),
'Program': 'configd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'0x7f8759c23de0.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[32282].subset.281',
'MachServices': {
'com.Breakpad.Inspector32282': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google '
'Chrome.app/Contents/Versions/21.0.1180.79/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector32282')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions': 1,
'Label': 'com.apple.spindump_agent',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {
'com.apple.spinreporteragent': 0,
},
'OnDemand': FakeCFObject(1),
'ProgramArguments': [FakeCFObject('/usr/libexec/spindump_agent')],
'TimeOut': FakeCFObject(30),
'TransactionCount': '-1',
}),
FakeCFDict({
'Label': '0x7f8759c16550.anonymous.login',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(73954),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c2f1a0.anonymous.configd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(17),
'Program': 'configd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.ZoomWindow',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.ZoomWindow.running': 0,
'com.apple.ZoomWindow.startup': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/ZoomWindow.app/Contents/MacOS/ZoomWindowStarter'
),
FakeCFObject('launchd'),
FakeCFObject('-s')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759c17a30.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(35104),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.syncservices.uihandler',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.syncservices.uihandler': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/PrivateFrameworks/SyncServicesUI.framework/Versions/Current/Resources/syncuid.app/Contents/MacOS/syncuid',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c17110.anonymous.Google Chrome',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[35104].subset.553',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32275),
'Program': 'Google Chrome',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.DictionaryPanelHelper',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.DictionaryPanelHelper': 0,
'com.apple.DictionaryPanelHelper.reply': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/Applications/Dictionary.app/Contents/SharedSupport/DictionaryPanelHelper.app/Contents/MacOS/DictionaryPanelHelper',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c1d630.anonymous.Python',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(69592),
'Program': 'python',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions': 1,
'Label': 'com.apple.talagent',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {
'com.apple.window_proxies': 0,
'com.apple.window_proxies.startup': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(639),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.CFPasteboardClient': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': '/System/Library/CoreServices/talagent',
'TimeOut': FakeCFObject(30),
'TransactionCount': 0,
}),
FakeCFDict({
'Label': '0x7f8759c1f7f0.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[60522].subset.309',
'MachServices': {
'com.Breakpad.BootstrapParent': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(60522),
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.speech.recognitionserver',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.speech.recognitionserver': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/Frameworks/Carbon.framework/Frameworks/SpeechRecognition.framework/Versions/A/SpeechRecognitionServer.app/Contents/MacOS/SpeechRecognitionServer',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c2faa0.anonymous.Python',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(82320),
'Program': 'python',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.cvmsCompAgent_x86_64',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.cvmsCompAgent_x86_64': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/CVMCompiler'
),
FakeCFObject('1')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759c23270.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32284].subset.584',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d30c30.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[60520].subset.399',
'MachServices': {
'com.Breakpad.BootstrapParent': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(60520),
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.printuitool.agent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.printuitool.agent': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/PrivateFrameworks/PrintingPrivate.framework/Versions/A/PrintUITool'
)
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759d29b20.anonymous.bash',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(46172),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.coreservices.uiagent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.coreservices.launcherror-handler': 0,
'com.apple.coreservices.quarantine-resolver': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/CoreServices/CoreServicesUIAgent.app/Contents/MacOS/CoreServicesUIAgent',
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.pool.1',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.pool.1': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.pool.1')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'[0x0-0x21021].com.google.GoogleDrive',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(763),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.CFPasteboardClient': 0,
'com.apple.axserver': 0,
'com.apple.coredrag': 0,
'com.apple.tsm.portname': 0,
},
'ProgramArguments': [
FakeCFObject(
'/Applications/Google Drive.app/Contents/MacOS/Google Drive'),
FakeCFObject('-psn_0_135201')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.cvmsCompAgent_i386',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.cvmsCompAgent_i386': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/CVMCompiler'
),
FakeCFObject('1')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759c2b8b0.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32284].subset.584',
'MachServices': {
'com.Breakpad.BootstrapParent': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32284),
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d1f860.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32283),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.VoiceOver',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.VoiceOver.running': 0,
'com.apple.VoiceOver.startup': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/VoiceOver.app/Contents/MacOS/VoiceOver'
),
FakeCFObject('launchd'),
FakeCFObject('-s')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759d2e7b0.anonymous.tail',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(74455),
'Program': 'tail',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.PreferenceSyncAgent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/PreferenceSyncClient.app/Contents/MacOS/PreferenceSyncClient'
),
FakeCFObject('--sync'),
FakeCFObject('--periodic')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c15a50.anonymous.login',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(38234),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.i386.framework.0',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.i386.framework.0': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker32'
),
FakeCFObject('-s'),
FakeCFObject('mdworker-lsb'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.i386.framework.0')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'com.apple.launchctl.Background',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject('/bin/launchctl'),
FakeCFObject('bootstrap'),
FakeCFObject('-S'),
FakeCFObject('Background')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.speech.synthesisserver',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.speech.synthesis.ScreenReaderPort': 0,
'com.apple.speech.synthesis.SpeakingHotKeyPort': 0,
'com.apple.speech.synthesis.TimeAnnouncementsPort': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/SpeechSynthesis.framework/Versions/A/SpeechSynthesisServer.app/Contents/MacOS/SpeechSynthesisServer',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'0x7f8759d207b0.anonymous.launchd',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'[0x0-0x4d44d4].com.google.GoogleTalkPluginD[32298].subset.257',
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(499),
'Program':
'launchd',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.ATS.FontValidatorConduit',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.ATS.FontValidatorConduit': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/ATS.framework/Versions/A/Support/FontValidatorConduit',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.fontd',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.FontObjectsServer': 0,
'com.apple.FontServer': 0,
},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(640),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/ApplicationServices.framework/Frameworks/ATS.framework/Support/fontd'
)
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
0,
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.quicklook',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.quicklook': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/QuickLook.framework/Resources/quicklookd.app/Contents/MacOS/quicklookd'
)
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759d29e20.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(35271),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d20db0.anonymous.sshd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(68600),
'Program': 'sshd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.unmountassistant.useragent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.unmountassistant.useragent': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/UnmountAssistantAgent.app/Contents/MacOS/UnmountAssistantAgent'
)
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759d1ebf0.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32282),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.installd.user',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.installd.user': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/PrivateFrameworks/PackageKit.framework/Resources/installd'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d33ce0.anonymous.login',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(46170),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c240f0.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32284),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.syncdefaultsd',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.syncdefaultsd': 0,
'com.apple.syncdefaultsd.push': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/PrivateFrameworks/SyncedDefaults.framework/Support/syncdefaultsd'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.marcoagent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.marco': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/PrivateFrameworks/Marco.framework/marcoagent')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.distnoted.xpc.agent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.distributed_notifications@Uv3': 0,
},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(625),
'ProgramArguments': [
FakeCFObject('/usr/sbin/distnoted'),
FakeCFObject('agent')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
42,
}),
FakeCFDict({
'Label': '0x7f8759c2eb70.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32282].subset.281',
'MachServices': {
'com.Breakpad.BootstrapParent': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32282),
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d28f10.anonymous.login',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(83461),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c1fb00.anonymous.Google Chrome C',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[60522].subset.309',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(60513),
'Program': 'Google Chrome C',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.bluetoothAudioAgent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.bluetoothAudioAgent': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/BluetoothAudioAgent.app/Contents/MacOS/BluetoothAudioAgent'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.pool.framework.0',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.pool.framework.0': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.pool.framework.0')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'0x7f8759d20190.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[32297].subset.637',
'MachServices': {
'com.Breakpad.Inspector32297': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google '
'Chrome.app/Contents/Versions/21.0.1180.79/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector32297')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'[0x0-0x19019].com.apple.AppleSpell',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'Multilingual (Apple)_OpenStep': 0,
'da (Apple)_OpenStep': 0,
'de (Apple)_OpenStep': 0,
'en (Apple)_OpenStep': 0,
'en_AU (Apple)_OpenStep': 0,
'en_CA (Apple)_OpenStep': 0,
'en_GB (Apple)_OpenStep': 0,
'en_JP (Apple)_OpenStep': 0,
'en_US (Apple)_OpenStep': 0,
'es (Apple)_OpenStep': 0,
'fr (Apple)_OpenStep': 0,
'it (Apple)_OpenStep': 0,
'nl (Apple)_OpenStep': 0,
'pt (Apple)_OpenStep': 0,
'pt_BR (Apple)_OpenStep': 0,
'pt_PT (Apple)_OpenStep': 0,
'ru (Apple)_OpenStep': 0,
'sv (Apple)_OpenStep': 0,
},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(727),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Services/AppleSpell.service/Contents/MacOS/AppleSpell'
),
FakeCFObject('-psn_0_102425')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
0,
}),
FakeCFDict({
'Label': '0x7f8759d22370.anonymous.Google Chrome',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32656].subset.619',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32275),
'Program': 'Google Chrome',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'0x7f8759d2f3c0.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'[0x0-0x34c34c].com.google.Chrome.canary[60513].subset.374',
'MachServices': {
'com.Breakpad.Inspector60513': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google Chrome '
'Canary.app/Contents/Versions/180.1.1025.40/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector60513')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.pool.framework.1',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.pool.framework.1': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.pool.framework.1')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.lsb.framework.0',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.lsb.framework.0': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker-lsb'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.lsb.framework.0')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759c16060.anonymous.bash',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(68666),
'Program': 'sshd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.store_helper',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.store_helper': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/PrivateFrameworks/CommerceKit.framework/Resources/store_helper.app/Contents/MacOS/store_helper',
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.pool.framework.2',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.pool.framework.2': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.pool.framework.2')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'com.apple.FontRegistryUIAgent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.FontRegistry.FontRegistryUIAgent': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/Frameworks/ApplicationServices.framework/Frameworks/ATS.framework/Support/FontRegistryUIAgent.app/Contents/MacOS/FontRegistryUIAgent',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.softwareupdateagent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject('/System/Library/CoreServices/Software '
'Update.app/Contents/Resources/SoftwareUpdateCheck'),
FakeCFObject('-LaunchApp'),
FakeCFObject('YES')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.ubd',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/PrivateFrameworks/Ubiquity.framework/Versions/A/Support/ubd'
)
],
'Sockets': {
'Apple_Ubiquity_Message': ('-1'),
},
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d07d60.anonymous.applepushservic',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(85),
'Program': 'applepushservic',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c1c700.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32291].subset.223',
'MachServices': {
'com.Breakpad.BootstrapParent': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32291),
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d2c7e0.anonymous.Google Chrome',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32438].subset.554',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32275),
'Program': 'Google Chrome',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c103c0.anonymous.bash',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(24593),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.pool.framework.3',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.pool.framework.3': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.pool.framework.3')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.ScreenReaderUIServer',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.ScreenReaderUIServer': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/PrivateFrameworks/ScreenReader.framework/Resources/ScreenReaderUIServer.app/Contents/MacOS/ScreenReaderUIServer',
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759c1ab70.anonymous.Google Chrome',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32283].subset.231',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32275),
'Program': 'Google Chrome',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'[0x0-0x34c34c].com.google.Chrome.canary',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.google.Chrome.canary.rohitfork.60513': 0,
},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(60513),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.CFPasteboardClient': 0,
'com.apple.axserver': 0,
'com.apple.coredrag': 0,
'com.apple.tsm.portname': 0,
},
'ProgramArguments': [
FakeCFObject(
'/Applications/Google Chrome Canary.app/Contents/MacOS/Google '
'Chrome Canary'),
FakeCFObject('-psn_0_3457868')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c1bde0.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32285].subset.229',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions': 1,
'Label': 'com.apple.warmd_agent',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(737),
'ProgramArguments': [FakeCFObject('/usr/libexec/warmd_agent')],
'TimeOut': FakeCFObject(30),
'TransactionCount': 0,
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.ATS.FontValidator',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.ATS.FontValidator': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/ATS.framework/Versions/A/Support/FontValidator',
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759c115d0.anonymous.Google Chrome C',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[60518].subset.363',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(60513),
'Program': 'Google Chrome C',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.pool.3',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.pool.3': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.pool.3')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'0x7f8759c1e5a0.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[60518].subset.363',
'MachServices': {
'com.Breakpad.Inspector60518': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google Chrome '
'Canary.app/Contents/Versions/180.1.1025.40/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector60518')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.RemoteDesktop.agent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.RemoteDesktop.agent': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/CoreServices/RemoteManagement/ARDAgent.app/Contents/MacOS/ARDAgent',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c24490.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[60518].subset.363',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c18730.anonymous.sh',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(68799),
'Program': 'sh',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d2fcf0.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[35271].subset.440',
'MachServices': {
'com.Breakpad.BootstrapParent': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(35271),
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.FTCleanup',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject('/bin/sh'),
FakeCFObject('-c'),
FakeCFObject(
"if [ \"$HOME\" == \"/System\" ], then exit 0, fi, if [ -f "
"\"$HOME/Library/LaunchAgents/com.apple.imagent.plist\" ] , "
'then launchctl unload -wF '
'~/Library/LaunchAgents/com.apple.imagent.plist , launchctl '
'load -wF /System/Library/LaunchAgents/com.apple.imagent.plist'
' , fi , if [ -f '
"\"$HOME/Library/LaunchAgents/com.apple.apsd-ft.plist\" ] , "
"then launchctl unload -wF -S 'Aqua' "
'~/Library/LaunchAgents/com.apple.apsd-ft.plist, fi , if [ -f '
"\"$HOME/Library/LaunchAgents/com.apple.marcoagent.plist\" ] ,"
' then launchctl unload -wF '
'~/Library/LaunchAgents/com.apple.marcoagent.plist , launchctl'
' load -wF '
'/System/Library/LaunchAgents/com.apple.marcoagent.plist , fi '
', if [ -f '
"\"$HOME/Library/LaunchAgents/com.apple.FTMonitor.plist\" ] , "
'then launchctl unload -wF '
'~/Library/LaunchAgents/com.apple.FTMonitor.plist , fi ,')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.isolation.0',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.isolation.0': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.isolation.0')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'com.apple.netauth.user.gui',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.netauth.user.gui': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/NetAuthAgent.app/Contents/MacOS/NetAuthAgent'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d28310.anonymous.bash',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(83462),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d31250.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[60520].subset.399',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'[0x0-0x9009].com.apple.Terminal',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.Terminal.ServiceProvider': 0,
},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(634),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.CFPasteboardClient': 0,
'com.apple.axserver': 0,
'com.apple.coredrag': 0,
'com.apple.tsm.portname': 0,
},
'ProgramArguments': [
FakeCFObject(
'/Applications/Utilities/Terminal.app/Contents/MacOS/Terminal'),
FakeCFObject('-psn_0_36873')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
1,
}),
FakeCFDict({
'Label': '0x7f8759c2d1c0.anonymous.su',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(74539),
'Program': 'su',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c1d940.anonymous.sshd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(68714),
'Program': 'sshd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'org.openbsd.ssh-agent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(46009),
'ProgramArguments': [
FakeCFObject('/usr/bin/ssh-agent'),
FakeCFObject('-l')
],
'Sockets': {
'Listeners': ('-1'),
},
'TimeOut':
FakeCFObject(30),
'TransactionCount':
0,
}),
FakeCFDict({
'Label':
'com.apple.familycontrols.useragent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.familycontrols.useragent': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/PrivateFrameworks/FamilyControls.framework/Resources/ParentalControls.app/Contents/MacOS/ParentalControls'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c1b7c0.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32285].subset.229',
'MachServices': {
'com.Breakpad.BootstrapParent': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32285),
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.AppStoreUpdateAgent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.AppStoreUpdateAgent': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/Applications/App Store.app/Contents/Resources/appstoreupdateagent',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.csuseragent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.csuseragent': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject('/System/Library/CoreServices/CSUserAgent')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.PubSub.Agent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.pubsub.ipc': 0,
'com.apple.pubsub.notification': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/PubSub.framework/Versions/A/Resources/PubSubAgent.app/Contents/MacOS/PubSubAgent'
)
],
'Sockets': {
'Render': ('-1'),
},
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.rcd',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.rcd': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/rcd.app/Contents/MacOS/rcd')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'com.apple.netauth.user.auth',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.netauth.user.auth': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/CoreServices/NetAuthAgent.app/Contents/MacOS/NetAuthSysAgent'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c1dc40.anonymous.bash',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(68720),
'Program': 'sshd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c2f7a0.anonymous.bash',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(75030),
'Program': 'login',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.BezelUIServer',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.BezelUI': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/LoginPlugins/BezelServices.loginPlugin/Contents/Resources/BezelUI/BezelUIServer'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c0cf00.anonymous.com.apple.dock.',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(652),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.CFPasteboardClient': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'com.apple.dock.',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d28c10.anonymous.bash',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(83462),
'Program': 'bash',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions': 1,
'Label': 'com.apple.xgridd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {
'com.apple.xgridd': 0,
},
'OnDemand': FakeCFObject(1),
'ProgramArguments': [FakeCFObject('/usr/libexec/xgrid/xgridd')],
'TimeOut': FakeCFObject(30),
'TransactionCount': '-1',
}),
FakeCFDict({
'Label':
'com.apple.reclaimspace',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.ReclaimSpace': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/CoreServices/backupd.bundle/Contents/Resources/ReclaimSpaceAgent.app/Contents/MacOS/ReclaimSpaceAgent',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'0x7f8759d31550.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[60520].subset.399',
'MachServices': {
'com.Breakpad.Inspector60520': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google Chrome '
'Canary.app/Contents/Versions/180.1.1025.40/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector60520')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'[0x0-0x4c54c5].com.google.Chrome',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.google.Chrome.rohitfork.32275': 0,
},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(32275),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.CFPasteboardClient': 0,
'com.apple.axserver': 0,
'com.apple.coredrag': 0,
'com.apple.tsm.portname': 0,
},
'ProgramArguments': [
FakeCFObject(
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'),
FakeCFObject('-psn_0_5002437')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c0c320.anonymous.loginwindow',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(71),
'Program': 'loginwindow',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.lsb.0',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.lsb.0': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker-lsb'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.lsb.0')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'com.apple.midiserver',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.midiserver': 0,
'com.apple.midiserver.io': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreMIDI.framework/MIDIServer')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c15d50.anonymous.eapolclient',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(68168),
'Program': 'eapolclient',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.AddressBook.SourceSync',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.AddressBook.PushNotification': 0,
'com.apple.AddressBook.SourceSync': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/AddressBook.framework/Versions/A/Resources/AddressBookSourceSync.app/Contents/MacOS/AddressBookSourceSync'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.i386.0',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.i386.0': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker32'
),
FakeCFObject('-s'),
FakeCFObject('mdworker-lsb'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.i386.0')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'0x7f8759d2a8d0.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[32438].subset.554',
'MachServices': {
'com.Breakpad.Inspector32438': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google '
'Chrome.app/Contents/Versions/21.0.1180.79/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector32438')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d2a130.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32656].subset.619',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'0x7f8759c1c0e0.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[32285].subset.229',
'MachServices': {
'com.Breakpad.Inspector32285': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google '
'Chrome.app/Contents/Versions/21.0.1180.79/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector32285')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c15450.anonymous.sshd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(68665),
'Program': 'sshd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': 'com.apple.tiswitcher',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {
'com.apple.inputswitcher.running': 0,
'com.apple.inputswitcher.startup': 0,
'com.apple.inputswitcher.stop': 0,
},
'OnDemand': FakeCFObject(1),
'Program':
'/System/Library/CoreServices/Menu '
'Extras/TextInput.menu/Contents/SharedSupport/TISwitcher.app/Contents/MacOS/TISwitcher',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': 'com.apple.java.InstallOnDemandAgent',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {
'com.apple.java.installondemand': 0,
},
'OnDemand': FakeCFObject(1),
'Program':
'/System/Library/Java/Support/CoreDeploy.bundle/Contents/Download '
'Java Components.app/Contents/MacOS/Download Java Components',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c1a860.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32283].subset.231',
'MachServices': {
'com.Breakpad.BootstrapParent': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32283),
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.cookied',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.cookied': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/CFNetwork.framework/Versions/A/Support/cookied'
)
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'com.apple.speech.feedbackservicesserver',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.speech.feedbackservicesserver': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/Frameworks/Carbon.framework/Frameworks/SpeechRecognition.framework/Versions/A/SpeechFeedbackWindow.app/Contents/MacOS/SpeechFeedbackWindow',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label':
'0x7f8759c1d020.mach_init.crash_inspector',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Google Chrome H[32291].subset.223',
'MachServices': {
'com.Breakpad.Inspector32291': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/Applications/Google '
'Chrome.app/Contents/Versions/21.0.1180.79/Google Chrome '
'Framework.framework/Resources/crash_inspector'),
FakeCFObject('com.Breakpad.Inspector32291')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.AddressBook.abd',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.AddressBook.abd': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/AddressBook.framework/Versions/A/Resources/AddressBookManager.app/Contents/MacOS/AddressBookManager'
)
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label':
'com.apple.cfnetwork.AuthBrokerAgent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.cfnetwork.AuthBrokerAgent': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject('/System/Library/CoreServices/AuthBrokerAgent')
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.SystemUIServer.agent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.BluetoothMEDOServer': 0,
'com.apple.SUISMessaging': 0,
'com.apple.dockextra.server': 0,
'com.apple.dockling.server': 0,
'com.apple.ipodserver': 0,
'com.apple.systemuiserver.ServiceProvider': 0,
'com.apple.systemuiserver.screencapture': 0,
'com.apple.tsm.uiserver': 0,
},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(641),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.CFPasteboardClient': 0,
'com.apple.axserver': 0,
'com.apple.coredrag': 0,
'com.apple.tsm.portname': 0,
},
'Program':
'/System/Library/CoreServices/SystemUIServer.app/Contents/MacOS/SystemUIServer',
'TimeOut':
FakeCFObject(30),
'TransactionCount':
0,
}),
FakeCFDict({
'Label':
'com.apple.safaridavclient',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.safaridavclient': 0,
'com.apple.safaridavclient.push': 0,
},
'OnDemand':
FakeCFObject(1),
'ProgramArguments': [
FakeCFObject(
'/System/Library/PrivateFrameworks/BookmarkDAV.framework/Helpers/SafariDAVClient'
)
],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': 'com.apple.Dock.agent',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {
'com.apple.dock.appstore': 0,
'com.apple.dock.downloads': 0,
'com.apple.dock.fullscreen': 0,
'com.apple.dock.server': 0,
},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(638),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.axserver': 0,
'com.apple.coredrag': 0,
},
'Program': '/System/Library/CoreServices/Dock.app/Contents/MacOS/Dock',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label':
'com.apple.TrustEvaluationAgent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.TrustEvaluationAgent': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/PrivateFrameworks/TrustEvaluationAgent.framework/Resources/trustevaluationagent',
'ProgramArguments': [FakeCFObject('trustevaluationagent')],
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.storeagent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.storeagent': 0,
'com.apple.storeagent-xpc': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/PrivateFrameworks/CommerceKit.framework/Versions/A/Resources/storeagent',
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.imklaunchagent',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Aqua',
'MachServices': {
'com.apple.inputmethodkit.launchagent': 0,
'com.apple.inputmethodkit.launcher': 0,
},
'OnDemand':
FakeCFObject(1),
'Program':
'/System/Library/Frameworks/InputMethodKit.framework/Resources/imklaunchagent',
'TimeOut':
FakeCFObject(30),
'TransactionCount':
'-1',
}),
FakeCFDict({
'Label': '0x7f8759c1cd20.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32291].subset.223',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d2eab0.anonymous.su',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(74539),
'Program': 'su',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c2ee80.anonymous.Google Chrome',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32282].subset.281',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(32275),
'Program': 'Google Chrome',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d2f9e0.anonymous.Google Chrome H',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'MachServices': {},
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(60518),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
'com.apple.axserver': 0,
'com.apple.tsm.portname': 0,
},
'Program': 'Google Chrome H',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c16e10.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'EnableTransactions':
1,
'Label':
'com.apple.mdworker.pool.0',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'Background',
'MachServices': {
'com.apple.mdworker.pool.0': 0,
},
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(68737),
'PerJobMachServices': {
'WakeUpProcessPort': 0,
},
'ProgramArguments': [
FakeCFObject(
'/System/Library/Frameworks/CoreServices.framework/Frameworks/Metadata.framework/Versions/A/Support/mdworker'
),
FakeCFObject('-s'),
FakeCFObject('mdworker'),
FakeCFObject('-c'),
FakeCFObject('MDSImporterWorker'),
FakeCFObject('-m'),
FakeCFObject('com.apple.mdworker.pool.0')
],
'TimeOut':
FakeCFObject(30),
'TransactionCount':
0,
}),
FakeCFDict({
'Label':
'0x7f8759d1f460.anonymous.launchd',
'LastExitStatus':
FakeCFObject(0),
'LimitLoadToSessionType':
'[0x0-0x4c54c5].com.google.Chrome[32275].subset.632',
'OnDemand':
FakeCFObject(1),
'PID':
FakeCFObject(499),
'Program':
'launchd',
'TimeOut':
FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d1e8f0.anonymous.launchd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Google Chrome H[32297].subset.637',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(499),
'Program': 'launchd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759c1d330.anonymous.sshd',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Background',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(68710),
'Program': 'sshd',
'TimeOut': FakeCFObject(30),
}),
FakeCFDict({
'Label': '0x7f8759d2ba80.anonymous.sudo',
'LastExitStatus': FakeCFObject(0),
'LimitLoadToSessionType': 'Aqua',
'OnDemand': FakeCFObject(1),
'PID': FakeCFObject(68719),
'Program': 'sudo',
'TimeOut': FakeCFObject(30),
})
]
# pylint: enable=g-line-too-long
|
{
"content_hash": "b616f0f588805bbcba3bae39b2cebf44",
"timestamp": "",
"source": "github",
"line_count": 2984,
"max_line_length": 193,
"avg_line_length": 31.373994638069703,
"alnum_prop": 0.5263939329203162,
"repo_name": "google/grr",
"id": "56dceb7f91efbbcaf8a331dd32e8cb0d59fa97de",
"size": "93642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/test_lib/osx_launchd_testdata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
}
|
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from joblib import Parallel
from collections import defaultdict
from ..utils.validation import check_is_fitted
from ..utils.fixes import delayed
from ..utils import check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from .._config import config_context
def estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points.
quantile : float, default=0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, default=None
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance, default=None
The generator used to randomly select the samples from input points
for bandwidth estimation. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
n_neighbors = int(X.shape[0] * quantile)
if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0
n_neighbors = 1
nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.0
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()["radius"]
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth, return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (
np.linalg.norm(my_mean - my_old_mean) < stop_thresh
or completed_iterations == max_iter
):
break
completed_iterations += 1
return tuple(my_mean), len(points_within), completed_iterations
def mean_shift(
X,
*,
bandwidth=None,
seeds=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
max_iter=300,
n_jobs=None,
):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
bandwidth : float, default=None
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like of shape (n_seeds, n_features) or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
Notes
-----
For an example, see :ref:`examples/cluster/plot_mean_shift.py
<sphx_glr_auto_examples_cluster_plot_mean_shift.py>`.
"""
model = MeanShift(
bandwidth=bandwidth,
seeds=seeds,
min_bin_freq=min_bin_freq,
bin_seeding=bin_seeding,
cluster_all=cluster_all,
n_jobs=n_jobs,
max_iter=max_iter,
).fit(X)
return model.cluster_centers_, model.labels_
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : int, default=1
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like of shape (n_samples, n_features)
Points used as initial kernel positions in clustering.mean_shift.
"""
if bin_size == 0:
return X
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array(
[point for point, freq in bin_sizes.items() if freq >= min_bin_freq],
dtype=np.float32,
)
if len(bin_seeds) == len(X):
warnings.warn(
"Binning data failed with provided bin_size=%f, using data points as seeds."
% bin_size
)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(ClusterMixin, BaseEstimator):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, default=None
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array-like of shape (n_samples, n_features), default=None
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
The default value is False.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
.. versionadded:: 0.22
Attributes
----------
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels_ : ndarray of shape (n_samples,)
Labels of each point.
n_iter_ : int
Maximum number of iterations performed on each seed.
.. versionadded:: 0.22
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.cluster import MeanShift
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = MeanShift(bandwidth=2).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering.predict([[0, 0], [5, 5]])
array([1, 0])
>>> clustering
MeanShift(bandwidth=2)
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(
self,
*,
bandwidth=None,
seeds=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
n_jobs=None,
max_iter=300,
):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
self.max_iter = max_iter
def fit(self, X, y=None):
"""Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
"""
X = self._validate_data(X)
bandwidth = self.bandwidth
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs)
elif bandwidth <= 0:
raise ValueError(
"bandwidth needs to be greater than zero or None, got %f" % bandwidth
)
seeds = self.seeds
if seeds is None:
if self.bin_seeding:
seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
# We use n_jobs=1 because this will be used in nested calls under
# parallel calls to _mean_shift_single_seed so there is no need for
# for further parallelism.
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=self.n_jobs)(
delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter)
for seed in seeds
)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i][1]: # i.e. len(points_within) > 0
center_intensity_dict[all_res[i][0]] = all_res[i][1]
self.n_iter_ = max([x[2] for x in all_res])
if not center_intensity_dict:
# nothing near seeds
raise ValueError(
"No point was within bandwidth=%f of any seed. Try a different seeding"
" strategy or increase the bandwidth."
% bandwidth
)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(
center_intensity_dict.items(),
key=lambda tup: (tup[1], tup[0]),
reverse=True,
)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=bool)
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit(
sorted_centers
)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[
0
]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=int)
distances, idxs = nbrs.kneighbors(X)
if self.cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
self.cluster_centers_, self.labels_ = cluster_centers, labels
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
with config_context(assume_finite=True):
return pairwise_distances_argmin(X, self.cluster_centers_)
|
{
"content_hash": "76baa0c20114779ee35da93df5e57db7",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 88,
"avg_line_length": 35.16367265469062,
"alnum_prop": 0.6329681557586422,
"repo_name": "huzq/scikit-learn",
"id": "e8ece2034d0f0ca5b3c4432aa3c984f7fb038b59",
"size": "17617",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/cluster/_mean_shift.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6394128"
},
{
"name": "Shell",
"bytes": "9250"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.images.images import views
VIEWS_MOD = 'openstack_dashboard.dashboards.admin.images.images.views'
urlpatterns = patterns(VIEWS_MOD,
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^upload/$', views.UploadView.as_view(), name='upload'),
url(r'^downloadimage/$', 'download_image', name='downloadimage'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<image_id>[^/]+)/download/$', views.DownloadImageView.as_view(), name='download'),
url(r'^(?P<image_id>[^/]+)/$', views.DetailView.as_view(), name='detail'),
)
|
{
"content_hash": "49b3e0b99b893e253d677bb71b348c4b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 96,
"avg_line_length": 40.22222222222222,
"alnum_prop": 0.669889502762431,
"repo_name": "xuweiliang/Codelibrary",
"id": "9832a87df0fd879e2c7c03cc6089ba6378fd2ac9",
"size": "1488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/images/images/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134284"
},
{
"name": "HTML",
"bytes": "830844"
},
{
"name": "JavaScript",
"bytes": "2421484"
},
{
"name": "Makefile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "17185807"
},
{
"name": "Shell",
"bytes": "9144"
}
],
"symlink_target": ""
}
|
from operator import itemgetter
from typing import Optional, cast
from libsyntyche.widgets import HBoxLayout, Label, Stretch, mk_signal1
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import Qt
from ..common import Settings
class TagInfoList(QtWidgets.QScrollArea):
error = mk_signal1(str)
print_ = mk_signal1(str)
class TagCountBar(QtWidgets.QWidget):
def __init__(self, parent: QtWidgets.QWidget,
percentage: float) -> None:
super().__init__(parent)
self.percentage = percentage
def paintEvent(self, ev: QtGui.QPaintEvent) -> None:
right_offset = (1 - self.percentage) * ev.rect().width()
painter = QtGui.QPainter(self)
painter.fillRect(ev.rect().adjusted(0, 0, -int(right_offset), 0),
painter.background())
painter.end()
def __init__(self, parent: QtWidgets.QWidget, settings: Settings) -> None:
super().__init__(parent)
self.setSizeAdjustPolicy(self.AdjustToContents)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
self.tag_macros: dict[str, str] = settings.tag_macros.value
settings.tag_macros.changed.connect(self.set_tag_macros)
self.panel = QtWidgets.QWidget(self)
self.panel.setObjectName('tag_info_list_panel')
self.panel.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Maximum)
layout = QtWidgets.QGridLayout(self.panel)
layout.setColumnStretch(2, 1)
layout.setHorizontalSpacing(10)
# layout.setSizeConstraint(layout.SetMinAndMaxSize)
# TODO: something less ugly than this
self.setFixedHeight(200)
self.panel.setLayout(layout)
self.setWidget(self.panel)
self.setWidgetResizable(True)
self.hide()
def clear(self) -> None:
layout = self.panel.layout()
while not layout.isEmpty():
item = layout.takeAt(0)
if item and item.widget() is not None:
item.widget().deleteLater()
def set_tag_macros(self, tag_macros: dict[str, str]) -> None:
self.tag_macros = tag_macros
def _make_tag(self, tag: str) -> QtWidgets.QWidget:
tag_label_wrapper = QtWidgets.QWidget(self)
tag_label = Label(tag, name='tag', parent=tag_label_wrapper)
tag_label.setStyleSheet('background: #667;')
HBoxLayout(tag_label, Stretch(), parent=tag_label_wrapper)
return tag_label_wrapper
def view_tags(self, tags: list[tuple[str, int]], sort_alphabetically: bool,
reverse: bool, name_filter: Optional[str]) -> None:
self.clear()
max_count = max(t[1] for t in tags)
if sort_alphabetically:
tags.sort(key=itemgetter(0))
else:
tags.sort(key=itemgetter(0), reverse=True)
tags.sort(key=itemgetter(1))
# If alphabetically, we want to default to ascending,
# but if we're sorting by usage count, we want it descending.
if reverse or (not sort_alphabetically and not reverse):
tags.reverse()
if name_filter:
tags = [t for t in tags if name_filter in t[0]]
layout = cast(QtWidgets.QGridLayout, self.panel.layout())
for n, (tag, count) in enumerate(tags):
# Tag name
layout.addWidget(self._make_tag(tag), n, 0)
# Tag count
layout.addWidget(Label(count, name='tag_info_count', parent=self),
n, 1, alignment=Qt.AlignBottom)
# Tag bar
count_bar = self.TagCountBar(self, count / max_count)
layout.addWidget(count_bar, n, 2)
self.show()
def view_macros(self) -> None:
# TODO: better view of this
self.clear()
layout = cast(QtWidgets.QGridLayout, self.panel.layout())
for n, (tag, macro) in enumerate(sorted(self.tag_macros.items())):
# Tag macro name
layout.addWidget(self._make_tag('@' + tag), n, 0)
# Tag macro expression
layout.addWidget(Label(macro, name='tag_info_macro_expression',
word_wrap=True, parent=self), n, 1)
self.show()
|
{
"content_hash": "6d7d534ac8cdd92c74cb724a732dc562",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 41.83653846153846,
"alnum_prop": 0.601241094001379,
"repo_name": "nycz/sapfo",
"id": "c52f6513fdb914611be851b1bfe07db2c811d99e",
"size": "4351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sapfo/index/taginfolist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6186"
},
{
"name": "Python",
"bytes": "157249"
}
],
"symlink_target": ""
}
|
from mail_utils.messages import (TemplateMixin, EnvelopeMixin, ImagesMixin,
TemplateMessageMixin, EnvelopedMessageMixin)
|
{
"content_hash": "4490e161b2c0d61ba426ce5e09696d56",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 75,
"avg_line_length": 62.5,
"alnum_prop": 0.84,
"repo_name": "koorgoo/django-mail-utils",
"id": "0217dc2e3bc75679521df24d8f6a73d824627d94",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mail_utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "145"
},
{
"name": "Python",
"bytes": "11337"
}
],
"symlink_target": ""
}
|
from settings import * # noqa
INSTALLED_APPS += (
'debug_toolbar',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False # Prevent DDT from patching the settings.
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
def debug_toolbar_enabled(request):
"""Callback used by the Django Debug Toolbar to decide when to display."""
# We want to make sure to have the DEBUG value at runtime, not the one we
# have in this specific settings file.
from django.conf import settings
return settings.DEBUG
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'settings.debug_toolbar_enabled',
'JQUERY_URL': '', # Use the jquery that's already on the page.
}
# Disable CSP by setting it as report only. We can't enable it because it uses
# "data:" for its logo, and it uses "unsafe eval" for some panels like the
# templates or SQL ones.
CSP_REPORT_ONLY = True
|
{
"content_hash": "c6eff8e9d730737c7b7e92e0c16d6ec7",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 32,
"alnum_prop": 0.7165178571428571,
"repo_name": "andymckay/addons-server",
"id": "bab9dac761495f9bc3c91c60f2676683f933673d",
"size": "1035",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "djdt_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "846032"
},
{
"name": "HTML",
"bytes": "1589366"
},
{
"name": "JavaScript",
"bytes": "1316196"
},
{
"name": "Makefile",
"bytes": "4442"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "4128481"
},
{
"name": "Shell",
"bytes": "9112"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os.path
import platform
import shutil
import sys
try:
from setuptools import setup
except ImportError as e:
from distutils.core import setup
if sys.version_info[0] < 3:
sys.exit('Sorry, Python < 3 is not supported')
# Set to False to disable compiling cython modules, set to True to enable cython
use_cython = False
# let cmake configure whether we use cython or not
# this string will be replaced by cmake with a string literal in that case
cmake_use_cython = '${USE_CYTHON}'
if cmake_use_cython.startswith("$"):
pass # cmake did not configure this file
else:
use_cython = (cmake_use_cython == "True")
# allow the user to set whether cython is used using an environment variable
if "MONOSAT_CYTHON" in os.environ:
use_cython = str(os.environ["MONOSAT_CYTHON"]) == "1"
# allow cmake to configure the package directory
package_dir = '${PACKAGE_DIR}'
if package_dir.startswith("$"):
package_dir = '.'
library_dir = "${CMAKE_BINARY_DIR}"
if library_dir.startswith("$"):
library_dir = "../../../../"
monosat_path = "${CMAKE_SOURCE_DIR}/src"
if monosat_path.startswith("$"):
monosat_path = "../../../../src/"
if use_cython:
print("Attempting Cython installation")
# attempt to load the cython modules
try:
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from distutils.command.sdist import sdist as _sdist
except:
print("Could not load cython modules, falling back on ctypes")
use_cython = False
if platform.system() == "Darwin":
sharedlib = 'libmonosat.dylib'
elif platform.system() != "Windows":
sharedlib = 'libmonosat.so'
else:
sharedlib = 'libmonosat.dll'
orig_lib = library_dir + "/" + sharedlib
copy_lib = package_dir + "/monosat/" + sharedlib
if os.path.exists(orig_lib):
# only copy the library if it hasn't already been copied (this facilitates separate build/install steps)
if not os.path.exists(copy_lib) or os.path.getmtime(orig_lib) > os.path.getmtime(copy_lib):
shutil.copy2(orig_lib, package_dir + "/monosat/")
if not os.path.exists(package_dir + "/monosat/" + sharedlib):
print("Warning: could not find %s. See README for instructions on compiling the library, the re-install" % (
sharedlib), file=sys.stderr)
if use_cython:
# build the cython interface to monosat
cmdclass = {}
cmdclass.update({'build_ext': build_ext})
setup(
version='1.6',
python_requires='>3.0.0',
description='MonoSAT Cython Interface',
author='Sam Bayless',
author_email='sbayless@cs.ubc.ca',
url='http://www.cs.ubc.ca/labs/isd/projects/monosat/',
cmdclass=cmdclass,
runtime_library_dirs=['./', package_dir + "/"],
ext_modules=cythonize([Extension("monosat.monosat_p", [package_dir + "/monosat/monosat_p.pyx"],
include_dirs=[".", package_dir, package_dir + "/monosat", monosat_path],
libraries=["monosat"],
language="c", extra_compile_args=["-DNDEBUG", "-O3"]
)], include_path=[package_dir, package_dir + "/monosat"], gdb_debug=True),
install_requires=['cython'],
packages=['monosat'],
package_data={'monosat': [sharedlib]},
package_dir={'': package_dir},
)
else:
setup(name='monosat',
version='1.6',
python_requires='>3.0.0',
description='MonoSAT Python Interface',
author='Sam Bayless',
author_email='sbayless@cs.ubc.ca',
url='http://www.cs.ubc.ca/labs/isd/projects/monosat/',
packages=['monosat'],
package_data={'monosat': [sharedlib]},
package_dir={'': package_dir},
)
|
{
"content_hash": "3c8d44d8c01dd18f65e79740f492edb9",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 115,
"avg_line_length": 35.017857142857146,
"alnum_prop": 0.6228964813870475,
"repo_name": "sambayless/monosat",
"id": "acadea8799dd008018e51023401adafa9d775f87",
"size": "3945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/monosat/api/python/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33006"
},
{
"name": "C++",
"bytes": "3493499"
},
{
"name": "CMake",
"bytes": "25650"
},
{
"name": "Cython",
"bytes": "75137"
},
{
"name": "Java",
"bytes": "423109"
},
{
"name": "Python",
"bytes": "269148"
},
{
"name": "Scala",
"bytes": "3262"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = config['password']
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client, self)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
logging.debug(key)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
if self._closed:
self._server_socket.close()
for sock in self._sockets:
sock.close()
self._eventloop.remove_periodic(self.handle_periodic)
def close(self, next_tick=False):
self._closed = True
if not next_tick:
self._eventloop.remove(self._server_socket, self)
self._server_socket.close()
|
{
"content_hash": "13c34df7717204025571c53a4237885f",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 36.72566371681416,
"alnum_prop": 0.5390361445783133,
"repo_name": "lucienevans/shadowsocks",
"id": "b67770acca1ffe805f6f4ea84f36b45dbbc43a4f",
"size": "10603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shadowsocks/udprelay.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "145172"
},
{
"name": "Shell",
"bytes": "14934"
}
],
"symlink_target": ""
}
|
from globus_sdk.tokenstorage.base import FileAdapter, StorageAdapter
from globus_sdk.tokenstorage.file_adapters import SimpleJSONFileAdapter
from globus_sdk.tokenstorage.sqlite_adapter import SQLiteAdapter
__all__ = ("SimpleJSONFileAdapter", "SQLiteAdapter", "StorageAdapter", "FileAdapter")
|
{
"content_hash": "96595381d544604b5de2f2176914a4d0",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 85,
"avg_line_length": 58.6,
"alnum_prop": 0.8293515358361775,
"repo_name": "globus/globus-sdk-python",
"id": "9ed982da9b3f09e263dbfadf3c62c39081aec146",
"size": "293",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/globus_sdk/tokenstorage/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "303"
},
{
"name": "Makefile",
"bytes": "810"
},
{
"name": "Python",
"bytes": "896256"
},
{
"name": "Shell",
"bytes": "125"
}
],
"symlink_target": ""
}
|
from multiprocessing import Process
import unittest
import numpy as np
import os
from bigdl.ppml.fl import *
from bigdl.ppml.fl.estimator import Estimator
from bigdl.ppml.fl.nn.fl_server import FLServer
from bigdl.ppml.fl.nn.tensorflow.utils import set_one_like_parameter
from bigdl.ppml.fl.nn.fl_context import init_fl_context
from bigdl.ppml.fl.nn.tensorflow.estimator import TensorflowEstimator
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
from tensorflow.keras.layers import Dense, Flatten, Conv2D, InputLayer
from tensorflow.keras import Model, Input
resource_path = os.path.join(os.path.dirname(__file__), "../../resources")
class TestCorrectness(FLTest):
fmt = '%(asctime)s %(levelname)s {%(module)s:%(lineno)d} - %(message)s'
logging.basicConfig(format=fmt, level=logging.INFO)
tf.config.run_functions_eagerly(True) # enable step-by-step debug
def setUp(self) -> None:
self.fl_server = FLServer()
self.fl_server.set_port(self.port)
self.fl_server.build()
self.fl_server.start()
def tearDown(self) -> None:
self.fl_server.stop()
def test_mnist(self) -> None:
"""
following code is copied from pytorch quick start
link: https://www.tensorflow.org/tutorials/quickstart/advanced
"""
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis].astype("float32")
x_test = x_test[..., tf.newaxis].astype("float32")
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train[:5000], y_train[:5000])).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
model = build_whole_model()
set_one_like_parameter(model)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
return loss
@tf.function
def test_step(images, labels):
# training=False is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=False)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
tensorflow_loss_history = []
EPOCHS = 1
for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
size = len(train_ds)
for batch, (images, labels) in enumerate(train_ds):
loss = train_step(images, labels)
if batch % 10 == 0:
tensorflow_loss_history.append(np.array(loss))
logging.info(f"loss: {loss:>7f} [{batch:>5d}/{size:>5d}] \
epoch {epoch}/{EPOCHS}")
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
print(
f'Epoch {epoch + 1}, '
f'Loss: {train_loss.result()}, '
f'Accuracy: {train_accuracy.result() * 100}, '
f'Test Loss: {test_loss.result()}, '
f'Test Accuracy: {test_accuracy.result() * 100}'
)
# TODO: set fixed parameters
init_fl_context(1, self.target)
vfl_model_1 = build_client_model()
set_one_like_parameter(vfl_model_1)
vfl_model_2 = build_server_model()
set_one_like_parameter(vfl_model_2)
vfl_client_ppl = Estimator.from_keras(client_model=vfl_model_1,
loss_fn=loss_object,
optimizer_cls=tf.keras.optimizers.Adam,
optimizer_args={},
server_model=vfl_model_2)
vfl_client_ppl.fit(train_ds)
assert np.allclose(tensorflow_loss_history, vfl_client_ppl.loss_history), \
"Validation failed, correctness of PPML and native Pytorch not the same"
def build_client_model():
inputs = Input(shape=(28, 28, 1))
x = Conv2D(32, 3, activation='relu')(inputs)
outputs = Flatten()(x)
return Model(inputs=inputs, outputs=outputs, name="vfl_client_model")
def build_server_model():
inputs = Input(shape=(21632))
x = Dense(128, activation='relu')(inputs)
outputs = Dense(10)(x)
return Model(inputs=inputs, outputs=outputs, name="vfl_server_model")
def build_whole_model():
inputs = Input(shape=(28, 28, 1))
x = Conv2D(32, 3, activation='relu')(inputs)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
outputs = Dense(10)(x)
return Model(inputs=inputs, outputs=outputs, name="vfl_whole_model")
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "d0f2cbc3f3722e94dde8523f8ff24853",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 90,
"avg_line_length": 39.1474358974359,
"alnum_prop": 0.5950548550843294,
"repo_name": "yangw1234/BigDL",
"id": "df9f531e19f593f44085f42441756450f1b1b260",
"size": "6694",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/ppml/test/bigdl/ppml/fl/nn/tensorflow/test_mnist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "138760"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54063856"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8762180"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216038"
},
{
"name": "Shell",
"bytes": "844916"
}
],
"symlink_target": ""
}
|
import asyncio
import os
import sys
if not sys.version >= '3.6':
print('This script requires Python 3.6+')
sys.exit()
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt.async_support as ccxt # noqa: E402
async def poll():
exchange = ccxt.bittrex()
while True:
yield await exchange.fetch_order_book('BTC/USDT')
await asyncio.sleep(exchange.rateLimit / 1000)
async def main():
async for orderbook in poll():
print(orderbook['bids'][0], orderbook['asks'][0])
asyncio.run(main())
|
{
"content_hash": "40ceeb2ce067212c2c9d5c6a6ee45b9c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 83,
"avg_line_length": 22.555555555555557,
"alnum_prop": 0.6617405582922824,
"repo_name": "ccxt/ccxt",
"id": "efce406fd5829fd2e0d5971f3bee15d78cceabc3",
"size": "634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/py/async-bittrex-orderbook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "246"
},
{
"name": "JavaScript",
"bytes": "11619228"
},
{
"name": "PHP",
"bytes": "10272973"
},
{
"name": "Python",
"bytes": "9037496"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
}
|
"""Progress bars module"""
import time
import datetime
import sys
from qiskit.tools.events.pubsub import Subscriber
class BaseProgressBar(Subscriber):
"""An abstract progress bar with some shared functionality."""
def __init__(self):
super().__init__()
self.type = "progressbar"
self.touched = False
self.iter = None
self.t_start = None
self.t_done = None
def start(self, iterations):
"""Start the progress bar.
Parameters:
iterations (int): Number of iterations.
"""
self.touched = True
self.iter = int(iterations)
self.t_start = time.time()
def update(self, n):
"""Update status of progress bar."""
pass
def time_elapsed(self):
"""Return the time elapsed since start.
Returns:
elapsed_time: Time since progress bar started.
"""
return "%6.2fs" % (time.time() - self.t_start)
def time_remaining_est(self, completed_iter):
"""Estimate the remaining time left.
Parameters:
completed_iter (int): Number of iterations completed.
Returns:
est_time: Estimated time remaining.
"""
if completed_iter:
t_r_est = (time.time() - self.t_start) / completed_iter * (self.iter - completed_iter)
else:
t_r_est = 0
date_time = datetime.datetime(1, 1, 1) + datetime.timedelta(seconds=t_r_est)
time_string = "%02d:%02d:%02d:%02d" % (
date_time.day - 1,
date_time.hour,
date_time.minute,
date_time.second,
)
return time_string
def finished(self):
"""Run when progress bar has completed."""
pass
class TextProgressBar(BaseProgressBar):
"""
A simple text-based progress bar.
output_handler : the handler the progress bar should be written to, default
is sys.stdout, another option is sys.stderr
"""
def __init__(self, output_handler=None):
super().__init__()
self._init_subscriber()
self.output_handler = output_handler if output_handler else sys.stdout
def _init_subscriber(self):
def _initialize_progress_bar(num_tasks):
""" """
self.start(num_tasks)
self.subscribe("terra.parallel.start", _initialize_progress_bar)
def _update_progress_bar(progress):
""" """
self.update(progress)
self.subscribe("terra.parallel.done", _update_progress_bar)
def _finish_progress_bar():
""" """
self.unsubscribe("terra.parallel.start", _initialize_progress_bar)
self.unsubscribe("terra.parallel.done", _update_progress_bar)
self.unsubscribe("terra.parallel.finish", _finish_progress_bar)
self.finished()
self.subscribe("terra.parallel.finish", _finish_progress_bar)
def start(self, iterations):
self.touched = True
self.iter = int(iterations)
self.t_start = time.time()
pbar = "-" * 50
self.output_handler.write("\r|{}| {}{}{} [{}]".format(pbar, 0, "/", self.iter, ""))
def update(self, n):
# Don't update if we are not initialized or
# the update iteration number is greater than the total iterations set on start.
if not self.touched or n > self.iter:
return
filled_length = int(round(50 * n / self.iter))
pbar = "█" * filled_length + "-" * (50 - filled_length)
time_left = self.time_remaining_est(n)
self.output_handler.write("\r|{}| {}{}{} [{}]".format(pbar, n, "/", self.iter, time_left))
if n == self.iter:
self.output_handler.write("\n")
self.output_handler.flush()
|
{
"content_hash": "12a04a24326e3c0125fa7d0d031475d6",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 98,
"avg_line_length": 30.81451612903226,
"alnum_prop": 0.5710546977231091,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "dba2ffdd84cf33ff451da162ad15e37345648795",
"size": "6123",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/tools/events/progressbar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
}
|
from datetime import date
from dateutil.rrule import MONTHLY, rrule
from juriscraper.AbstractSite import logger
from juriscraper.lib.html_utils import get_html5_parsed_text
from juriscraper.lib.string_utils import convert_date_string
from juriscraper.OpinionSite import OpinionSite
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court_id = self.__module__
date_keys = rrule(
MONTHLY, dtstart=date(2003, 11, 1), until=date(2015, 8, 30)
)
self.back_scrape_iterable = [i.date() for i in date_keys]
self.row_base_path = '//tr[contains(./td[1]/a/@href, "3d")]'
self.division = 1
self.url = self.build_url()
def _get_case_names(self):
path = f"{self.row_base_path}/td[1]"
return [cell.text_content() for cell in self.html.xpath(path)]
def build_url(self, target_date=False):
base = (
"http://www.courts.state.ny.us/reporter/slipidx/aidxtable_%s"
% self.division
)
if target_date:
return "{}_{}_{}.shtml".format(
base,
target_date.year,
target_date.strftime("%B"),
)
else:
return f"{base}.shtml"
def _get_download_urls(self):
path = f"{self.row_base_path}/td[1]//a/@href"
return self.html.xpath(path)
def _get_case_dates(self):
case_dates = []
for element in self.html.xpath("//caption | //center"):
date_string = (
element.text_content().strip().replace("Cases Decided ", "")
)
path_prefix = (
"./parent::"
if element.tag == "caption"
else "./following-sibling::"
)
path = f"{path_prefix}table[1]{self.row_base_path}"
cases = element.xpath(path)
case_dates.extend([convert_date_string(date_string)] * len(cases))
return case_dates
def _get_precedential_statuses(self):
return ["Published"] * len(self.case_names)
def _get_docket_numbers(self):
path = f"{self.row_base_path}/td[3]"
return list(
map(
self._add_str_to_list_where_empty_element,
self.html.xpath(path),
)
)
def _get_judges(self):
path = f"{self.row_base_path}/td[2]"
return list(
map(
self._add_str_to_list_where_empty_element,
self.html.xpath(path),
)
)
def _get_citations(self):
path = f"{self.row_base_path}/td[4]"
return [cell.text_content().strip() for cell in self.html.xpath(path)]
@staticmethod
def _add_str_to_list_where_empty_element(element):
string_list = element.xpath("./text()")
return string_list[0] if string_list else ""
def _download_backwards(self, target_date):
self.crawl_date = target_date
logger.info(f"Running backscraper with date: {target_date}")
self.url = self.build_url(target_date=target_date)
self.html = self._download()
def _make_html_tree(self, text):
return get_html5_parsed_text(text)
|
{
"content_hash": "5e12c55a970f27ce9c10ba58fdbbe424",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 78,
"avg_line_length": 33.255102040816325,
"alnum_prop": 0.5556919300398895,
"repo_name": "freelawproject/juriscraper",
"id": "7fd38c8a784bdf2db403365384d154011841911d",
"size": "3420",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "juriscraper/opinions/united_states/state/nyappdiv_1st.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "63242956"
},
{
"name": "Jinja",
"bytes": "2201"
},
{
"name": "Makefile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "1059228"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
from hanlp.components.tokenizers.tok import NgramConvTokenizer
from hanlp.datasets.tokenization.sighan2005.msr import SIGHAN2005_MSR_TRAIN, SIGHAN2005_MSR_VALID, SIGHAN2005_MSR_TEST
from hanlp.pretrained.word2vec import CONVSEG_W2V_NEWS_TENSITE_CHAR, CONVSEG_W2V_NEWS_TENSITE_WORD_MSR
from tests import cdroot
cdroot()
tokenizer = NgramConvTokenizer()
save_dir = 'data/model/cws/convseg-msr-nocrf-noembed'
tokenizer.fit(SIGHAN2005_MSR_TRAIN,
SIGHAN2005_MSR_VALID,
save_dir,
word_embed={'class_name': 'HanLP>Word2VecEmbedding',
'config': {
'trainable': True,
'filepath': CONVSEG_W2V_NEWS_TENSITE_CHAR,
'expand_vocab': False,
'lowercase': False,
}},
ngram_embed={'class_name': 'HanLP>Word2VecEmbedding',
'config': {
'trainable': True,
'filepath': CONVSEG_W2V_NEWS_TENSITE_WORD_MSR,
'expand_vocab': True,
'lowercase': False,
}},
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001,
epsilon=1e-8, clipnorm=5),
epochs=3,
window_size=4,
metrics='f1',
weight_norm=True)
print(tokenizer.predict(['中央民族乐团离开北京前往维也纳', '商品和服务']))
tokenizer.load(save_dir, metrics='f1')
tokenizer.evaluate(SIGHAN2005_MSR_TEST, save_dir=save_dir)
|
{
"content_hash": "092a3788a17d72660ffaa0f115828ab9",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 118,
"avg_line_length": 46.388888888888886,
"alnum_prop": 0.5293413173652695,
"repo_name": "hankcs/HanLP",
"id": "ed319995a6491de435d1058c902dd3b15633a541",
"size": "1775",
"binary": false,
"copies": "1",
"ref": "refs/heads/doc-zh",
"path": "plugins/hanlp_demo/hanlp_demo/zh/tf/train/cws/train_msr_cws_ngram_conv_embed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "40933"
},
{
"name": "Jupyter Notebook",
"bytes": "566269"
},
{
"name": "Python",
"bytes": "2196905"
}
],
"symlink_target": ""
}
|
from frasco import Feature, action, flash, url_for, hook, lazy_translate
from frasco_users import current_user
from .blueprint import create_blueprint
class TwitterFeature(Feature):
name = "twitter"
requires = ["users"]
blueprints = [create_blueprint]
defaults = {"use_screenname_as_username": False,
"user_denied_login_message": lazy_translate("Login via Twitter was denied")}
def init_app(self, app):
self.app = app
self.api = app.features.users.create_oauth_app("twitter",
base_url='https://api.twitter.com/1.1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authenticate',
consumer_key=self.options["consumer_key"],
consumer_secret=self.options["consumer_secret"],
login_view="twitter_login.login")
@self.api.tokengetter
def token_getter(token=None):
if not current_user.is_authenticated() or not current_user.twitter_oauth_token:
return
return (current_user.twitter_oauth_token, current_user.twitter_oauth_token_secret)
self.model = app.features.models.ensure_model(app.features.users.model,
twitter_oauth_token=str,
twitter_oauth_token_secret=str,
twitter_screenname=dict(type=str, index=True))
@action("post_twitter_update", default_option="status")
def post_update(self, status):
self.api.post("statuses/update.json", data={"status": status})
|
{
"content_hash": "9e387fb74f9130c187466c14a0653e90",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 94,
"avg_line_length": 44.24324324324324,
"alnum_prop": 0.6499694563225412,
"repo_name": "frascoweb/frasco-twitter",
"id": "547b2d7a5d6b108d7289d6fd6e7494e4d29df855",
"size": "1637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frasco_twitter/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3211"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plea', '0039_auto_20180522_1341'),
]
operations = [
migrations.AlterField(
model_name='courtemailcount',
name='total_guilty_court',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='courtemailcount',
name='total_guilty_no_court',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='usagestats',
name='online_guilty_attend_court_pleas',
field=models.PositiveIntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='usagestats',
name='online_guilty_no_court_pleas',
field=models.PositiveIntegerField(blank=True, default=0, null=True),
),
]
|
{
"content_hash": "f9ee3e3d1d39975572361e7fd228c7ad",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 80,
"avg_line_length": 31.484848484848484,
"alnum_prop": 0.5967276227141483,
"repo_name": "ministryofjustice/manchester_traffic_offences_pleas",
"id": "4a3da0f40f55744fb131eae62b28c9ad5cb5f965",
"size": "1112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/plea/migrations/0040_auto_20180523_1512.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "867"
},
{
"name": "Gherkin",
"bytes": "10122"
},
{
"name": "HTML",
"bytes": "184454"
},
{
"name": "JavaScript",
"bytes": "52955"
},
{
"name": "Python",
"bytes": "792658"
},
{
"name": "SCSS",
"bytes": "43568"
},
{
"name": "Shell",
"bytes": "1766"
}
],
"symlink_target": ""
}
|
from django.conf.urls import *
urlpatterns = [
#url(r'', include('apps.api.v2.urls', namespace='default')),
url(r'^v1/', include('apps.api.v1.urls', namespace='v1')),
]
|
{
"content_hash": "5849a2aa71302b66b432fd6259043c13",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 64,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.6292134831460674,
"repo_name": "MySmile/sfchat",
"id": "6aadf5e5cb51c3aea807781d73145b6b2f70adf2",
"size": "178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/api/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5563"
},
{
"name": "HTML",
"bytes": "40427"
},
{
"name": "JavaScript",
"bytes": "117323"
},
{
"name": "Makefile",
"bytes": "2488"
},
{
"name": "Python",
"bytes": "79515"
},
{
"name": "Shell",
"bytes": "1902"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
"""
A TestRunner for use with the Python unit testing framework. It
generates a HTML report to show the result at a glance.
The simplest way to use this is to invoke its main method. E.g.
import unittest
import HTMLTestRunner
... define your tests ...
if __name__ == '__main__':
HTMLTestRunner.main()
For more customization options, instantiates a HTMLTestRunner object.
HTMLTestRunner is a counterpart to unittest's TextTestRunner. E.g.
# output to a file
fp = file('my_report.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title='My unit test',
description='This demonstrates the report output by HTMLTestRunner.'
)
# Use an external stylesheet.
# See the Template_mixin class for more customizable options
runner.STYLESHEET_TMPL = '<link rel="stylesheet" href="my_stylesheet.css" type="text/css">'
# run the test
runner.run(my_test_suite)
------------------------------------------------------------------------
Copyright (c) 2004-2007, Wai Yip Tung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name Wai Yip Tung nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# URL: http://tungwaiyip.info/software/HTMLTestRunner.html
__author__ = "Wai Yip Tung"
__version__ = "0.8.3"
"""
Change History
Version 0.8.3
* Prevent crash on class or module-level exceptions (Darren Wurf).
Version 0.8.2
* Show output inline instead of popup window (Viorel Lupu).
Version in 0.8.1
* Validated XHTML (Wolfgang Borgert).
* Added description of test classes and test cases.
Version in 0.8.0
* Define Template_mixin class for customization.
* Workaround a IE 6 bug that it does not treat <script> block as CDATA.
Version in 0.7.1
* Back port to Python 2.3 (Frank Horowitz).
* Fix missing scroll bars in detail log (Podi).
"""
# TODO: color stderr
# TODO: simplify javascript using ,ore than 1 class in the class attribute?
import datetime
import StringIO
import sys
import time
import unittest
from xml.sax import saxutils
# ------------------------------------------------------------------------
# The redirectors below are used to capture output during testing. Output
# sent to sys.stdout and sys.stderr are automatically captured. However
# in some cases sys.stdout is already cached before HTMLTestRunner is
# invoked (e.g. calling logging.basicConfig). In order to capture those
# output, use the redirectors for the cached stream.
#
# e.g.
# >>> logging.basicConfig(stream=HTMLTestRunner.stdout_redirector)
# >>>
def to_unicode(s):
try:
return unicode(s)
except UnicodeDecodeError:
# s is non ascii byte string
return s.decode('unicode_escape')
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(to_unicode(s))
def writelines(self, lines):
lines = map(to_unicode, lines)
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
# ----------------------------------------------------------------------
# Template
class Template_mixin(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
STATUS = {
0: 'pass',
1: 'fail',
2: 'error',
}
DEFAULT_TITLE = 'Unit Test Report'
DEFAULT_DESCRIPTION = ''
# ------------------------------------------------------------------------
# HTML Template
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta name="generator" content="%(generator)s"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
%(stylesheet)s
</head>
<body>
<script language="javascript" type="text/javascript"><!--
output_list = Array();
/* level - 0:Summary; 1:Failed; 2:All */
function showCase(level) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (id.substr(0,2) == 'ft') {
if (level < 1) {
tr.className = 'hiddenRow';
}
else {
tr.className = '';
}
}
if (id.substr(0,2) == 'pt') {
if (level > 1) {
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
tid0 = 't' + cid.substr(1) + '.' + (i+1);
tid = 'f' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'p' + tid0;
tr = document.getElementById(tid);
}
id_list[i] = tid;
if (tr.className) {
toHide = 0;
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
if (toHide) {
if(document.getElementById('div_'+tid)){
document.getElementById('div_'+tid).style.display = 'none';
}
document.getElementById(tid).className = 'hiddenRow';
}
else {
document.getElementById(tid).className = '';
}
}
}
function showTestDetail(div_id){
var details_div = document.getElementById(div_id)
var displayState = details_div.style.display
// alert(displayState)
if (displayState != 'block' ) {
displayState = 'block'
details_div.style.display = 'block'
}
else {
details_div.style.display = 'none'
}
}
function html_escape(s) {
s = s.replace(/&/g,'&');
s = s.replace(/</g,'<');
s = s.replace(/>/g,'>');
return s;
}
/* obsoleted by detail in <div>
function showOutput(id, name) {
var w = window.open("", //url
name,
"resizable,scrollbars,status,width=800,height=450");
d = w.document;
d.write("<pre>");
d.write(html_escape(output_list[id]));
d.write("\n");
d.write("<a href='javascript:window.close()'>close</a>\n");
d.write("</pre>\n");
d.close();
}
*/
--></script>
%(heading)s
%(report)s
%(ending)s
</body>
</html>
"""
# variables: (title, generator, stylesheet, heading, report, ending)
# ------------------------------------------------------------------------
# Stylesheet
#
# alternatively use a <link> for external style sheet, e.g.
# <link rel="stylesheet" href="$url" type="text/css">
STYLESHEET_TMPL = """
<style type="text/css" media="screen">
body { font-family: verdana, arial, helvetica, sans-serif; font-size: 80%; }
table { font-size: 100%; }
pre { }
/* -- heading ---------------------------------------------------------------------- */
h1 {
font-size: 16pt;
color: gray;
}
.heading {
margin-top: 0ex;
margin-bottom: 1ex;
}
.heading .attribute {
margin-top: 1ex;
margin-bottom: 0;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
/* -- css div popup ------------------------------------------------------------------------ */
a.popup_link {
}
a.popup_link:hover {
color: red;
}
.popup_window {
display: none;
position: relative;
left: 0px;
top: 0px;
padding: 10px;
background-color: #EEE;
font-family: "Lucida Console", "Courier New", Courier, monospace;
text-align: left;
font-size: 8pt;
}
}
/* -- report ------------------------------------------------------------------------ */
#show_detail_line {
margin-top: 3ex;
margin-bottom: 1ex;
}
#result_table {
width: 80%;
border-collapse: collapse;
border: 1px solid #777;
}
#header_row {
font-weight: bold;
color: white;
background-color: #777;
}
#result_table td {
border: 1px solid rgba(119, 119, 119, 0.23);
padding: 2px;
vertical-align:top;
}
#total_row { font-weight: bold; }
.passClass { background-color: #00c853; color: white;}
.failClass { background-color: #fa842d; color: white;}
.errorClass { background-color: #fa2d2d; color: white;}
.passCase { color: #00c853; }
.failCase { color: #fa842d; background-color: #f9ede4; }
.errorCase { color: #fa2d2d; background-color: #ffefef }
.hiddenRow { display: none; }
.testcase { margin-left: 2em; }
/* -- ending ---------------------------------------------------------------------- */
#ending {
}
</style>
"""
# ------------------------------------------------------------------------
# Heading
#
HEADING_TMPL = """<div class='heading'>
<h1>%(title)s</h1>
%(parameters)s
<p class='description'>%(description)s</p>
</div>
""" # variables: (title, parameters, description)
HEADING_ATTRIBUTE_TMPL = """<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
""" # variables: (name, value)
# ------------------------------------------------------------------------
# Report
#
REPORT_TMPL = """
<p id='show_detail_line'>Show
<a href='javascript:showCase(0)'>Summary</a>
<a href='javascript:showCase(1)'>Failed</a>
<a href='javascript:showCase(2)'>All</a>
</p>
<table id='result_table'>
<colgroup>
<col align='left' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
</colgroup>
<tr id='header_row'>
<td>Test Group/Test case</td>
<td>Count</td>
<td>Pass</td>
<td>Fail</td>
<td>Error</td>
<td>View</td>
</tr>
%(test_list)s
<tr id='total_row'>
<td>Total</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td> </td>
</tr>
</table>
""" # variables: (test_list, count, Pass, fail, error)
REPORT_CLASS_TMPL = r"""
<tr class='%(style)s'>
<td>%(desc)s</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td><a href="javascript:showClassDetail('%(cid)s',%(count)s)">Detail</a></td>
</tr>
""" # variables: (style, desc, count, Pass, fail, error, cid)
REPORT_TEST_WITH_OUTPUT_TMPL = ur"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>
<!--css div popup start-->
<a class="popup_link" onfocus='this.blur();' href="javascript:showTestDetail('div_%(tid)s')" >
%(status)s</a>
<div id='div_%(tid)s' class="popup_window">
<div style='text-align: right;cursor:pointer;font-size: large;font-weight: bold;'>
<a onfocus='this.blur();' onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
×</a>
</div>
<pre>
%(script)s
</pre>
</div>
<!--css div popup end-->
</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_NO_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>%(status)s</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_OUTPUT_TMPL = r"""
%(id)s: %(output)s
""" # variables: (id, output)
# ------------------------------------------------------------------------
# ENDING
#
ENDING_TMPL = """<div id='ending'> </div>"""
# -------------------- The end of the Template class -------------------
TestResult = unittest.TestResult
class _TestResult(TestResult):
# note: _TestResult is a pure representation of results.
# It lacks the output and reporting ability compares to unittest._TextTestResult.
def __init__(self, verbosity=1):
TestResult.__init__(self)
self.outputBuffer = StringIO.StringIO()
self.stdout0 = None
self.stderr0 = None
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.verbosity = verbosity
# result is a list of result in 4 tuple
# (
# result code (0: success; 1: fail; 2: error),
# TestCase object,
# Test output (byte string),
# stack trace,
# )
self.result = []
def startTest(self, test):
TestResult.startTest(self, test)
# just one buffer for both stdout and stderr
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.stdout0 = sys.stdout
self.stderr0 = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.stdout0:
sys.stdout = self.stdout0
sys.stderr = self.stderr0
self.stdout0 = None
self.stderr0 = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
# Usually one of addSuccess, addError or addFailure would have been called.
# But there are some path in unittest that would bypass this.
# We must disconnect stdout in stopTest(), which is guaranteed to be called.
self.complete_output()
def addSuccess(self, test):
self.success_count += 1
TestResult.addSuccess(self, test)
output = self.complete_output()
self.result.append((0, test, output, ''))
if self.verbosity > 1:
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.')
def addError(self, test, err):
self.error_count += 1
TestResult.addError(self, test, err)
_, _exc_str = self.errors[-1]
output = self.complete_output()
self.result.append((2, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('E ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('E')
def addFailure(self, test, err):
self.failure_count += 1
TestResult.addFailure(self, test, err)
_, _exc_str = self.failures[-1]
output = self.complete_output()
self.result.append((1, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
class HTMLTestRunner(Template_mixin):
"""
"""
def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None):
self.stream = stream
self.verbosity = verbosity
if title is None:
self.title = self.DEFAULT_TITLE
else:
self.title = title
if description is None:
self.description = self.DEFAULT_DESCRIPTION
else:
self.description = description
self.startTime = datetime.datetime.now()
def run(self, test):
"Run the given test case or test suite."
result = _TestResult(self.verbosity)
test(result)
self.stopTime = datetime.datetime.now()
self.generateReport(test, result)
print >>sys.stderr, '\nTime Elapsed: %s' % (self.stopTime-self.startTime)
return result
def sortResult(self, result_list):
# unittest does not seems to run in any particular order.
# Here at least we want to group them together by class.
rmap = {}
classes = []
for n,t,o,e in result_list:
cls = t.__class__
if not rmap.has_key(cls):
rmap[cls] = []
classes.append(cls)
rmap[cls].append((n,t,o,e))
r = [(cls, rmap[cls]) for cls in classes]
return r
def getReportAttributes(self, result):
"""
Return report attributes as a list of (name, value).
Override this to add custom attributes.
"""
startTime = str(self.startTime)[:19]
duration = str(self.stopTime - self.startTime)
status = []
if result.success_count: status.append('Pass %s' % result.success_count)
if result.failure_count: status.append('Failure %s' % result.failure_count)
if result.error_count: status.append('Error %s' % result.error_count )
if status:
status = ' '.join(status)
else:
status = 'none'
return [
('Start Time', startTime),
('Duration', duration),
('Status', status),
]
def generateReport(self, test, result):
report_attrs = self.getReportAttributes(result)
generator = 'HTMLTestRunner %s' % __version__
stylesheet = self._generate_stylesheet()
heading = self._generate_heading(report_attrs)
report = self._generate_report(result)
ending = self._generate_ending()
output = self.HTML_TMPL % dict(
title = saxutils.escape(self.title),
generator = generator,
stylesheet = stylesheet,
heading = heading,
report = report,
ending = ending,
)
self.stream.write(output.encode('utf8'))
def _generate_stylesheet(self):
return self.STYLESHEET_TMPL
def _generate_heading(self, report_attrs):
a_lines = []
for name, value in report_attrs:
line = self.HEADING_ATTRIBUTE_TMPL % dict(
name = saxutils.escape(name),
value = saxutils.escape(value),
)
a_lines.append(line)
heading = self.HEADING_TMPL % dict(
title = saxutils.escape(self.title),
parameters = ''.join(a_lines),
description = saxutils.escape(self.description),
)
return heading
def _generate_report(self, result):
rows = []
sortedResult = self.sortResult(result.result)
for cid, (cls, cls_results) in enumerate(sortedResult):
# subtotal for a class
np = nf = ne = 0
for n,t,o,e in cls_results:
if n == 0: np += 1
elif n == 1: nf += 1
else: ne += 1
# format class description
if cls.__module__ == "__main__":
name = cls.__name__
else:
name = "%s.%s" % (cls.__module__, cls.__name__)
doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
desc = doc and '%s: %s' % (name, doc) or name
row = self.REPORT_CLASS_TMPL % dict(
style = ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass',
desc = desc,
count = np+nf+ne,
Pass = np,
fail = nf,
error = ne,
cid = 'c%s' % (cid+1),
)
rows.append(row)
for tid, (n,t,o,e) in enumerate(cls_results):
self._generate_report_test(rows, cid, tid, n, t, o, e)
report = self.REPORT_TMPL % dict(
test_list = ''.join(rows),
count = str(result.success_count+result.failure_count+result.error_count),
Pass = str(result.success_count),
fail = str(result.failure_count),
error = str(result.error_count),
)
return report
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
# e.g. 'pt1.1', 'ft1.1', etc
has_output = bool(o or e)
tid = (n == 0 and 'p' or 'f') + 't%s.%s' % (cid+1,tid+1)
name = t.id().split('.')[-1]
doc = t.shortDescription() or ""
desc = doc and ('%s: %s' % (name, doc)) or name
tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
# o and e should be byte string because they are collected from stdout and stderr?
if isinstance(o,str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# uo = unicode(o.encode('string_escape'))
uo = o.decode('latin-1')
else:
uo = o
if isinstance(e,str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# ue = unicode(e.encode('string_escape'))
ue = e.decode('latin-1')
else:
ue = e
script = self.REPORT_TEST_OUTPUT_TMPL % dict(
id = tid,
output = saxutils.escape(uo+ue),
)
row = tmpl % dict(
tid = tid,
Class = (n == 0 and 'hiddenRow' or 'none'),
style = n == 2 and 'errorCase' or (n == 1 and 'failCase' or 'none'),
desc = desc,
script = script,
status = self.STATUS[n],
)
rows.append(row)
if not has_output:
return
def _generate_ending(self):
return self.ENDING_TMPL
##############################################################################
# Facilities for running tests from the command line
##############################################################################
# Note: Reuse unittest.TestProgram to launch test. In the future we may
# build our own launcher to support more specific command line
# parameters like test title, CSS, etc.
class TestProgram(unittest.TestProgram):
"""
A variation of the unittest.TestProgram. Please refer to the base
class for command line parameters.
"""
def runTests(self):
# Pick HTMLTestRunner as the default test runner.
# base class's testRunner parameter is not useful because it means
# we have to instantiate HTMLTestRunner before we know self.verbosity.
if self.testRunner is None:
self.testRunner = HTMLTestRunner(verbosity=self.verbosity)
unittest.TestProgram.runTests(self)
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
|
{
"content_hash": "eb7f2f1ef0c052878ee47e559c9b0c20",
"timestamp": "",
"source": "github",
"line_count": 837,
"max_line_length": 109,
"avg_line_length": 29.675029868578257,
"alnum_prop": 0.5354295837023915,
"repo_name": "LukeMurphey/splunk-network-tools",
"id": "f747ebd8ee74f27d0ef19098424892a4ca849c37",
"size": "24839",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/HTMLTestRunner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "26768"
},
{
"name": "HTML",
"bytes": "25259"
},
{
"name": "JavaScript",
"bytes": "119795"
},
{
"name": "Python",
"bytes": "1621004"
}
],
"symlink_target": ""
}
|
"""Pens for creating UFO glyphs."""
from robofab.objects.objectsBase import MOVE, LINE, CORNER, CURVE, QCURVE, OFFCURVE
from robofab.objects.objectsRF import RContour, RSegment, RPoint
from robofab.pens.pointPen import BasePointToSegmentPen
from robofab.pens.adapterPens import SegmentToPointPen
class RFUFOPen(SegmentToPointPen):
def __init__(self, glyph):
SegmentToPointPen.__init__(self, RFUFOPointPen(glyph))
class RFUFOPointPen(BasePointToSegmentPen):
"""Point pen for building objectsRF glyphs"""
def __init__(self, glyph):
BasePointToSegmentPen.__init__(self)
self.glyph = glyph
def _flushContour(self, segments):
#
# adapted from robofab.pens.adapterPens.PointToSegmentPen
#
assert len(segments) >= 1
# if we only have one point and it has a name, we must have an anchor
first = segments[0]
segmentType, points = first
pt, smooth, name, kwargs = points[0]
if len(segments) == 1 and name != None:
self.glyph.appendAnchor(name, pt)
return
# we must have a contour
contour = RContour()
contour.setParent(self.glyph)
if segments[0][0] == "move":
# It's an open path.
closed = False
points = segments[0][1]
assert len(points) == 1
movePt, smooth, name, kwargs = points[0]
del segments[0]
else:
# It's a closed path, do a moveTo to the last
# point of the last segment. only if it isn't a qcurve
closed = True
segmentType, points = segments[-1]
movePt, smooth, name, kwargs = points[-1]
## THIS IS STILL UNDECIDED!!!
# since objectsRF currently follows the FL model of not
# allowing open contours, remove the last segment
# since it is being replaced by a move
if segmentType == 'line':
del segments[-1]
# construct a move segment and apply it to the contour if we aren't dealing with a qcurve
segment = RSegment()
segment.setParent(contour)
segment.smooth = smooth
rPoint = RPoint(x=movePt[0], y=movePt[1], pointType=MOVE, name=name)
rPoint.setParent(segment)
segment.points = [rPoint]
contour.segments.append(segment)
# do the rest of the segments
for segmentType, points in segments:
points = [(pt, name) for pt, smooth, name, kwargs in points]
if segmentType == "line":
assert len(points) == 1
sType = LINE
elif segmentType == "curve":
sType = CURVE
elif segmentType == "qcurve":
sType = QCURVE
else:
assert 0, "illegal segmentType: %s" % segmentType
segment = RSegment()
segment.setParent(contour)
segment.smooth = smooth
rPoints = []
# handle the offCurves
for point in points[:-1]:
point, name = point
rPoint = RPoint(x=point[0], y=point[1], pointType=OFFCURVE, name=name)
rPoint.setParent(segment)
rPoints.append(rPoint)
# now the onCurve
point, name = points[-1]
rPoint = RPoint(x=point[0], y=point[1], pointType=sType, name=name)
rPoint.setParent(segment)
rPoints.append(rPoint)
# apply them to the segment
segment.points = rPoints
contour.segments.append(segment)
if contour.segments[-1].type == "curve":
contour.segments[-1].points[-1].name = None
self.glyph.contours.append(contour)
def addComponent(self, glyphName, transform):
xx, xy, yx, yy, dx, dy = transform
self.glyph.appendComponent(baseGlyph=glyphName, offset=(dx, dy), scale=(xx, yy))
|
{
"content_hash": "3c876329d60fbe021168becc22d12bf5",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 91,
"avg_line_length": 32.029126213592235,
"alnum_prop": 0.6938466201879357,
"repo_name": "jamesgk/robofab",
"id": "265d7aea027f0ec0bea6c4f638f1648d51a59620",
"size": "3299",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Lib/robofab/pens/rfUFOPen.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8714"
},
{
"name": "HTML",
"bytes": "4597"
},
{
"name": "Makefile",
"bytes": "6776"
},
{
"name": "Python",
"bytes": "902044"
}
],
"symlink_target": ""
}
|
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('biobee.sitetheme')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
|
{
"content_hash": "106db11e70ce28cf4853b7a37913d43c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 59,
"avg_line_length": 21.77777777777778,
"alnum_prop": 0.7244897959183674,
"repo_name": "a25kk/biobee",
"id": "399b48f69a957abb33fc77689839ede3ae733372",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/biobee.sitetheme/biobee/sitetheme/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "426222"
},
{
"name": "HTML",
"bytes": "29770"
},
{
"name": "JavaScript",
"bytes": "792827"
},
{
"name": "Makefile",
"bytes": "2579"
},
{
"name": "Python",
"bytes": "33747"
}
],
"symlink_target": ""
}
|
"""Config flow for konnected.io integration."""
import asyncio
import copy
import logging
import random
import string
from urllib.parse import urlparse
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_DOOR,
DEVICE_CLASSES_SCHEMA,
)
from homeassistant.components.ssdp import ATTR_UPNP_MANUFACTURER, ATTR_UPNP_MODEL_NAME
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_BINARY_SENSORS,
CONF_HOST,
CONF_ID,
CONF_NAME,
CONF_PORT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_TYPE,
CONF_ZONE,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from .const import (
CONF_ACTIVATION,
CONF_API_HOST,
CONF_BLINK,
CONF_DEFAULT_OPTIONS,
CONF_DISCOVERY,
CONF_INVERSE,
CONF_MODEL,
CONF_MOMENTARY,
CONF_PAUSE,
CONF_POLL_INTERVAL,
CONF_REPEAT,
DOMAIN,
STATE_HIGH,
STATE_LOW,
ZONES,
)
from .errors import CannotConnect
from .panel import KONN_MODEL, KONN_MODEL_PRO, get_status
_LOGGER = logging.getLogger(__name__)
ATTR_KONN_UPNP_MODEL_NAME = "model_name" # standard upnp is modelName
CONF_IO = "io"
CONF_IO_DIS = "Disabled"
CONF_IO_BIN = "Binary Sensor"
CONF_IO_DIG = "Digital Sensor"
CONF_IO_SWI = "Switchable Output"
CONF_MORE_STATES = "more_states"
CONF_YES = "Yes"
CONF_NO = "No"
CONF_OVERRIDE_API_HOST = "override_api_host"
KONN_MANUFACTURER = "konnected.io"
KONN_PANEL_MODEL_NAMES = {
KONN_MODEL: "Konnected Alarm Panel",
KONN_MODEL_PRO: "Konnected Alarm Panel Pro",
}
OPTIONS_IO_ANY = vol.In([CONF_IO_DIS, CONF_IO_BIN, CONF_IO_DIG, CONF_IO_SWI])
OPTIONS_IO_INPUT_ONLY = vol.In([CONF_IO_DIS, CONF_IO_BIN, CONF_IO_DIG])
OPTIONS_IO_OUTPUT_ONLY = vol.In([CONF_IO_DIS, CONF_IO_SWI])
# Config entry schemas
IO_SCHEMA = vol.Schema(
{
vol.Optional("1", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("2", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("3", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("4", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("5", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("6", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("7", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("8", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("9", default=CONF_IO_DIS): OPTIONS_IO_INPUT_ONLY,
vol.Optional("10", default=CONF_IO_DIS): OPTIONS_IO_INPUT_ONLY,
vol.Optional("11", default=CONF_IO_DIS): OPTIONS_IO_INPUT_ONLY,
vol.Optional("12", default=CONF_IO_DIS): OPTIONS_IO_INPUT_ONLY,
vol.Optional("out", default=CONF_IO_DIS): OPTIONS_IO_OUTPUT_ONLY,
vol.Optional("alarm1", default=CONF_IO_DIS): OPTIONS_IO_OUTPUT_ONLY,
vol.Optional("out1", default=CONF_IO_DIS): OPTIONS_IO_OUTPUT_ONLY,
vol.Optional("alarm2_out2", default=CONF_IO_DIS): OPTIONS_IO_OUTPUT_ONLY,
}
)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE): vol.In(ZONES),
vol.Required(CONF_TYPE, default=DEVICE_CLASS_DOOR): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INVERSE, default=False): cv.boolean,
}
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE): vol.In(ZONES),
vol.Required(CONF_TYPE, default="dht"): vol.All(
vol.Lower, vol.In(["dht", "ds18b20"])
),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_POLL_INTERVAL, default=3): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
)
SWITCH_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE): vol.In(ZONES),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACTIVATION, default=STATE_HIGH): vol.All(
vol.Lower, vol.In([STATE_HIGH, STATE_LOW])
),
vol.Optional(CONF_MOMENTARY): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_PAUSE): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_REPEAT): vol.All(vol.Coerce(int), vol.Range(min=-1)),
}
)
OPTIONS_SCHEMA = vol.Schema(
{
vol.Required(CONF_IO): IO_SCHEMA,
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSOR_SCHEMA]
),
vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA]),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCH_SCHEMA]),
vol.Optional(CONF_BLINK, default=True): cv.boolean,
vol.Optional(CONF_API_HOST, default=""): vol.Any("", cv.url),
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
},
extra=vol.REMOVE_EXTRA,
)
CONFIG_ENTRY_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.matches_regex("[0-9a-f]{12}"),
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_MODEL): vol.Any(*KONN_PANEL_MODEL_NAMES),
vol.Required(CONF_ACCESS_TOKEN): cv.matches_regex("[a-zA-Z0-9]+"),
vol.Required(CONF_DEFAULT_OPTIONS): OPTIONS_SCHEMA,
},
extra=vol.REMOVE_EXTRA,
)
class KonnectedFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for NEW_NAME."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
# class variable to store/share discovered host information
discovered_hosts = {}
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
def __init__(self):
"""Initialize the Konnected flow."""
self.data = {}
self.options = OPTIONS_SCHEMA({CONF_IO: {}})
async def async_gen_config(self, host, port):
"""Populate self.data based on panel status.
This will raise CannotConnect if an error occurs
"""
self.data[CONF_HOST] = host
self.data[CONF_PORT] = port
try:
status = await get_status(self.hass, host, port)
self.data[CONF_ID] = status.get("chipId", status["mac"].replace(":", ""))
except (CannotConnect, KeyError):
raise CannotConnect
else:
self.data[CONF_MODEL] = status.get("model", KONN_MODEL)
self.data[CONF_ACCESS_TOKEN] = "".join(
random.choices(f"{string.ascii_uppercase}{string.digits}", k=20)
)
async def async_step_import(self, device_config):
"""Import a configuration.yaml config.
This flow is triggered by `async_setup` for configured panels.
"""
_LOGGER.debug(device_config)
# save the data and confirm connection via user step
await self.async_set_unique_id(device_config["id"])
self.options = device_config[CONF_DEFAULT_OPTIONS]
# config schema ensures we have port if we have host
if device_config.get(CONF_HOST):
# automatically connect if we have host info
return await self.async_step_user(
user_input={
CONF_HOST: device_config[CONF_HOST],
CONF_PORT: device_config[CONF_PORT],
}
)
# if we have no host info wait for it or abort if previously configured
self._abort_if_unique_id_configured()
return await self.async_step_import_confirm()
async def async_step_import_confirm(self, user_input=None):
"""Confirm the user wants to import the config entry."""
if user_input is None:
return self.async_show_form(
step_id="import_confirm",
description_placeholders={"id": self.unique_id},
)
# if we have ssdp discovered applicable host info use it
if KonnectedFlowHandler.discovered_hosts.get(self.unique_id):
return await self.async_step_user(
user_input={
CONF_HOST: KonnectedFlowHandler.discovered_hosts[self.unique_id][
CONF_HOST
],
CONF_PORT: KonnectedFlowHandler.discovered_hosts[self.unique_id][
CONF_PORT
],
}
)
return await self.async_step_user()
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered konnected panel.
This flow is triggered by the SSDP component. It will check if the
device is already configured and attempt to finish the config if not.
"""
_LOGGER.debug(discovery_info)
try:
if discovery_info[ATTR_UPNP_MANUFACTURER] != KONN_MANUFACTURER:
return self.async_abort(reason="not_konn_panel")
if not any(
name in discovery_info[ATTR_UPNP_MODEL_NAME]
for name in KONN_PANEL_MODEL_NAMES
):
_LOGGER.warning(
"Discovered unrecognized Konnected device %s",
discovery_info.get(ATTR_UPNP_MODEL_NAME, "Unknown"),
)
return self.async_abort(reason="not_konn_panel")
# If MAC is missing it is a bug in the device fw but we'll guard
# against it since the field is so vital
except KeyError:
_LOGGER.error("Malformed Konnected SSDP info")
else:
# extract host/port from ssdp_location
netloc = urlparse(discovery_info["ssdp_location"]).netloc.split(":")
return await self.async_step_user(
user_input={CONF_HOST: netloc[0], CONF_PORT: int(netloc[1])}
)
return self.async_abort(reason="unknown")
async def async_step_user(self, user_input=None):
"""Connect to panel and get config."""
errors = {}
if user_input:
# build config info and wait for user confirmation
self.data[CONF_HOST] = user_input[CONF_HOST]
self.data[CONF_PORT] = user_input[CONF_PORT]
# brief delay to allow processing of recent status req
await asyncio.sleep(0.1)
try:
status = await get_status(
self.hass, self.data[CONF_HOST], self.data[CONF_PORT]
)
except CannotConnect:
errors["base"] = "cannot_connect"
else:
self.data[CONF_ID] = status.get(
"chipId", status["mac"].replace(":", "")
)
self.data[CONF_MODEL] = status.get("model", KONN_MODEL)
# save off our discovered host info
KonnectedFlowHandler.discovered_hosts[self.data[CONF_ID]] = {
CONF_HOST: self.data[CONF_HOST],
CONF_PORT: self.data[CONF_PORT],
}
return await self.async_step_confirm()
return self.async_show_form(
step_id="user",
description_placeholders={
"host": self.data.get(CONF_HOST, "Unknown"),
"port": self.data.get(CONF_PORT, "Unknown"),
},
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=self.data.get(CONF_HOST)): str,
vol.Required(CONF_PORT, default=self.data.get(CONF_PORT)): int,
}
),
errors=errors,
)
async def async_step_confirm(self, user_input=None):
"""Attempt to link with the Konnected panel.
Given a configured host, will ask the user to confirm and finalize
the connection.
"""
if user_input is None:
# abort and update an existing config entry if host info changes
await self.async_set_unique_id(self.data[CONF_ID])
self._abort_if_unique_id_configured(updates=self.data)
return self.async_show_form(
step_id="confirm",
description_placeholders={
"model": KONN_PANEL_MODEL_NAMES[self.data[CONF_MODEL]],
"id": self.unique_id,
"host": self.data[CONF_HOST],
"port": self.data[CONF_PORT],
},
)
# Create access token, attach default options and create entry
self.data[CONF_DEFAULT_OPTIONS] = self.options
self.data[CONF_ACCESS_TOKEN] = self.hass.data.get(DOMAIN, {}).get(
CONF_ACCESS_TOKEN
) or "".join(random.choices(f"{string.ascii_uppercase}{string.digits}", k=20))
return self.async_create_entry(
title=KONN_PANEL_MODEL_NAMES[self.data[CONF_MODEL]], data=self.data,
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Return the Options Flow."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for a Konnected Panel."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.entry = config_entry
self.model = self.entry.data[CONF_MODEL]
self.current_opt = self.entry.options or self.entry.data[CONF_DEFAULT_OPTIONS]
# as config proceeds we'll build up new options and then replace what's in the config entry
self.new_opt = {CONF_IO: {}}
self.active_cfg = None
self.io_cfg = {}
self.current_states = []
self.current_state = 1
@callback
def get_current_cfg(self, io_type, zone):
"""Get the current zone config."""
return next(
(
cfg
for cfg in self.current_opt.get(io_type, [])
if cfg[CONF_ZONE] == zone
),
{},
)
async def async_step_init(self, user_input=None):
"""Handle options flow."""
return await self.async_step_options_io()
async def async_step_options_io(self, user_input=None):
"""Configure legacy panel IO or first half of pro IO."""
errors = {}
current_io = self.current_opt.get(CONF_IO, {})
if user_input is not None:
# strip out disabled io and save for options cfg
for key, value in user_input.items():
if value != CONF_IO_DIS:
self.new_opt[CONF_IO][key] = value
return await self.async_step_options_io_ext()
if self.model == KONN_MODEL:
return self.async_show_form(
step_id="options_io",
data_schema=vol.Schema(
{
vol.Required(
"1", default=current_io.get("1", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"2", default=current_io.get("2", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"3", default=current_io.get("3", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"4", default=current_io.get("4", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"5", default=current_io.get("5", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"6", default=current_io.get("6", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"out", default=current_io.get("out", CONF_IO_DIS)
): OPTIONS_IO_OUTPUT_ONLY,
}
),
description_placeholders={
"model": KONN_PANEL_MODEL_NAMES[self.model],
"host": self.entry.data[CONF_HOST],
},
errors=errors,
)
# configure the first half of the pro board io
if self.model == KONN_MODEL_PRO:
return self.async_show_form(
step_id="options_io",
data_schema=vol.Schema(
{
vol.Required(
"1", default=current_io.get("1", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"2", default=current_io.get("2", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"3", default=current_io.get("3", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"4", default=current_io.get("4", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"5", default=current_io.get("5", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"6", default=current_io.get("6", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"7", default=current_io.get("7", CONF_IO_DIS)
): OPTIONS_IO_ANY,
}
),
description_placeholders={
"model": KONN_PANEL_MODEL_NAMES[self.model],
"host": self.entry.data[CONF_HOST],
},
errors=errors,
)
return self.async_abort(reason="not_konn_panel")
async def async_step_options_io_ext(self, user_input=None):
"""Allow the user to configure the extended IO for pro."""
errors = {}
current_io = self.current_opt.get(CONF_IO, {})
if user_input is not None:
# strip out disabled io and save for options cfg
for key, value in user_input.items():
if value != CONF_IO_DIS:
self.new_opt[CONF_IO].update({key: value})
self.io_cfg = copy.deepcopy(self.new_opt[CONF_IO])
return await self.async_step_options_binary()
if self.model == KONN_MODEL:
self.io_cfg = copy.deepcopy(self.new_opt[CONF_IO])
return await self.async_step_options_binary()
if self.model == KONN_MODEL_PRO:
return self.async_show_form(
step_id="options_io_ext",
data_schema=vol.Schema(
{
vol.Required(
"8", default=current_io.get("8", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"9", default=current_io.get("9", CONF_IO_DIS)
): OPTIONS_IO_INPUT_ONLY,
vol.Required(
"10", default=current_io.get("10", CONF_IO_DIS)
): OPTIONS_IO_INPUT_ONLY,
vol.Required(
"11", default=current_io.get("11", CONF_IO_DIS)
): OPTIONS_IO_INPUT_ONLY,
vol.Required(
"12", default=current_io.get("12", CONF_IO_DIS)
): OPTIONS_IO_INPUT_ONLY,
vol.Required(
"alarm1", default=current_io.get("alarm1", CONF_IO_DIS)
): OPTIONS_IO_OUTPUT_ONLY,
vol.Required(
"out1", default=current_io.get("out1", CONF_IO_DIS)
): OPTIONS_IO_OUTPUT_ONLY,
vol.Required(
"alarm2_out2",
default=current_io.get("alarm2_out2", CONF_IO_DIS),
): OPTIONS_IO_OUTPUT_ONLY,
}
),
description_placeholders={
"model": KONN_PANEL_MODEL_NAMES[self.model],
"host": self.entry.data[CONF_HOST],
},
errors=errors,
)
return self.async_abort(reason="not_konn_panel")
async def async_step_options_binary(self, user_input=None):
"""Allow the user to configure the IO options for binary sensors."""
errors = {}
if user_input is not None:
zone = {"zone": self.active_cfg}
zone.update(user_input)
self.new_opt[CONF_BINARY_SENSORS] = self.new_opt.get(
CONF_BINARY_SENSORS, []
) + [zone]
self.io_cfg.pop(self.active_cfg)
self.active_cfg = None
if self.active_cfg:
current_cfg = self.get_current_cfg(CONF_BINARY_SENSORS, self.active_cfg)
return self.async_show_form(
step_id="options_binary",
data_schema=vol.Schema(
{
vol.Required(
CONF_TYPE,
default=current_cfg.get(CONF_TYPE, DEVICE_CLASS_DOOR),
): DEVICE_CLASSES_SCHEMA,
vol.Optional(
CONF_NAME, default=current_cfg.get(CONF_NAME, vol.UNDEFINED)
): str,
vol.Optional(
CONF_INVERSE, default=current_cfg.get(CONF_INVERSE, False)
): bool,
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper
},
errors=errors,
)
# find the next unconfigured binary sensor
for key, value in self.io_cfg.items():
if value == CONF_IO_BIN:
self.active_cfg = key
current_cfg = self.get_current_cfg(CONF_BINARY_SENSORS, self.active_cfg)
return self.async_show_form(
step_id="options_binary",
data_schema=vol.Schema(
{
vol.Required(
CONF_TYPE,
default=current_cfg.get(CONF_TYPE, DEVICE_CLASS_DOOR),
): DEVICE_CLASSES_SCHEMA,
vol.Optional(
CONF_NAME,
default=current_cfg.get(CONF_NAME, vol.UNDEFINED),
): str,
vol.Optional(
CONF_INVERSE,
default=current_cfg.get(CONF_INVERSE, False),
): bool,
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper
},
errors=errors,
)
return await self.async_step_options_digital()
async def async_step_options_digital(self, user_input=None):
"""Allow the user to configure the IO options for digital sensors."""
errors = {}
if user_input is not None:
zone = {"zone": self.active_cfg}
zone.update(user_input)
self.new_opt[CONF_SENSORS] = self.new_opt.get(CONF_SENSORS, []) + [zone]
self.io_cfg.pop(self.active_cfg)
self.active_cfg = None
if self.active_cfg:
current_cfg = self.get_current_cfg(CONF_SENSORS, self.active_cfg)
return self.async_show_form(
step_id="options_digital",
data_schema=vol.Schema(
{
vol.Required(
CONF_TYPE, default=current_cfg.get(CONF_TYPE, "dht")
): vol.All(vol.Lower, vol.In(["dht", "ds18b20"])),
vol.Optional(
CONF_NAME, default=current_cfg.get(CONF_NAME, vol.UNDEFINED)
): str,
vol.Optional(
CONF_POLL_INTERVAL,
default=current_cfg.get(CONF_POLL_INTERVAL, 3),
): vol.All(vol.Coerce(int), vol.Range(min=1)),
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper()
},
errors=errors,
)
# find the next unconfigured digital sensor
for key, value in self.io_cfg.items():
if value == CONF_IO_DIG:
self.active_cfg = key
current_cfg = self.get_current_cfg(CONF_SENSORS, self.active_cfg)
return self.async_show_form(
step_id="options_digital",
data_schema=vol.Schema(
{
vol.Required(
CONF_TYPE, default=current_cfg.get(CONF_TYPE, "dht")
): vol.All(vol.Lower, vol.In(["dht", "ds18b20"])),
vol.Optional(
CONF_NAME,
default=current_cfg.get(CONF_NAME, vol.UNDEFINED),
): str,
vol.Optional(
CONF_POLL_INTERVAL,
default=current_cfg.get(CONF_POLL_INTERVAL, 3),
): vol.All(vol.Coerce(int), vol.Range(min=1)),
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper()
},
errors=errors,
)
return await self.async_step_options_switch()
async def async_step_options_switch(self, user_input=None):
"""Allow the user to configure the IO options for switches."""
errors = {}
if user_input is not None:
zone = {"zone": self.active_cfg}
zone.update(user_input)
del zone[CONF_MORE_STATES]
self.new_opt[CONF_SWITCHES] = self.new_opt.get(CONF_SWITCHES, []) + [zone]
# iterate through multiple switch states
if self.current_states:
self.current_states.pop(0)
# only go to next zone if all states are entered
self.current_state += 1
if user_input[CONF_MORE_STATES] == CONF_NO:
self.io_cfg.pop(self.active_cfg)
self.active_cfg = None
if self.active_cfg:
current_cfg = next(iter(self.current_states), {})
return self.async_show_form(
step_id="options_switch",
data_schema=vol.Schema(
{
vol.Optional(
CONF_NAME, default=current_cfg.get(CONF_NAME, vol.UNDEFINED)
): str,
vol.Optional(
CONF_ACTIVATION,
default=current_cfg.get(CONF_ACTIVATION, STATE_HIGH),
): vol.All(vol.Lower, vol.In([STATE_HIGH, STATE_LOW])),
vol.Optional(
CONF_MOMENTARY,
default=current_cfg.get(CONF_MOMENTARY, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(
CONF_PAUSE,
default=current_cfg.get(CONF_PAUSE, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(
CONF_REPEAT,
default=current_cfg.get(CONF_REPEAT, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=-1)),
vol.Required(
CONF_MORE_STATES,
default=CONF_YES
if len(self.current_states) > 1
else CONF_NO,
): vol.In([CONF_YES, CONF_NO]),
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper(),
"state": str(self.current_state),
},
errors=errors,
)
# find the next unconfigured switch
for key, value in self.io_cfg.items():
if value == CONF_IO_SWI:
self.active_cfg = key
self.current_states = [
cfg
for cfg in self.current_opt.get(CONF_SWITCHES, [])
if cfg[CONF_ZONE] == self.active_cfg
]
current_cfg = next(iter(self.current_states), {})
self.current_state = 1
return self.async_show_form(
step_id="options_switch",
data_schema=vol.Schema(
{
vol.Optional(
CONF_NAME,
default=current_cfg.get(CONF_NAME, vol.UNDEFINED),
): str,
vol.Optional(
CONF_ACTIVATION,
default=current_cfg.get(CONF_ACTIVATION, STATE_HIGH),
): vol.In(["low", "high"]),
vol.Optional(
CONF_MOMENTARY,
default=current_cfg.get(CONF_MOMENTARY, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(
CONF_PAUSE,
default=current_cfg.get(CONF_PAUSE, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(
CONF_REPEAT,
default=current_cfg.get(CONF_REPEAT, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=-1)),
vol.Required(
CONF_MORE_STATES,
default=CONF_YES
if len(self.current_states) > 1
else CONF_NO,
): vol.In([CONF_YES, CONF_NO]),
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper(),
"state": str(self.current_state),
},
errors=errors,
)
return await self.async_step_options_misc()
async def async_step_options_misc(self, user_input=None):
"""Allow the user to configure the LED behavior."""
errors = {}
if user_input is not None:
# config schema only does basic schema val so check url here
try:
if user_input[CONF_OVERRIDE_API_HOST]:
cv.url(user_input.get(CONF_API_HOST, ""))
else:
user_input[CONF_API_HOST] = ""
except vol.Invalid:
errors["base"] = "bad_host"
else:
# no need to store the override - can infer
del user_input[CONF_OVERRIDE_API_HOST]
self.new_opt.update(user_input)
return self.async_create_entry(title="", data=self.new_opt)
return self.async_show_form(
step_id="options_misc",
data_schema=vol.Schema(
{
vol.Required(
CONF_BLINK, default=self.current_opt.get(CONF_BLINK, True)
): bool,
vol.Required(
CONF_OVERRIDE_API_HOST,
default=bool(self.current_opt.get(CONF_API_HOST)),
): bool,
vol.Optional(
CONF_API_HOST, default=self.current_opt.get(CONF_API_HOST, "")
): str,
}
),
errors=errors,
)
|
{
"content_hash": "914c8bf95b1236ef972b1cbb82daa620",
"timestamp": "",
"source": "github",
"line_count": 824,
"max_line_length": 99,
"avg_line_length": 40.19660194174757,
"alnum_prop": 0.49118410723990097,
"repo_name": "pschmitt/home-assistant",
"id": "f545c5f2f2a26de7da10deddcda7288d831fa20a",
"size": "33122",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/konnected/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'demo.views.index', name='index'),
url(r'^all_events/', 'demo.views.all_events', name='all_events'),
url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "036c9d115bf2edf38ca5a79a622cdad2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.66875,
"repo_name": "rodrigoamaral/django-fullcalendar",
"id": "512082b787daa3e4fcabf204381640c2e31b73e2",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demo/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11815"
},
{
"name": "HTML",
"bytes": "1672"
},
{
"name": "JavaScript",
"bytes": "154287"
},
{
"name": "Python",
"bytes": "14526"
}
],
"symlink_target": ""
}
|
"""
APE - a productive environment
"""
from __future__ import print_function
# Tasks specified here are available globally.
#
# WARNING: importing ape.tasks at the module level leads to a cyclic import for
# global tasks in this file, so import it inside the task function.
# The effect is specific to this file - you may import ape.tasks directly
# at the module level in tasks modules of features.
#
FEATURE_SELECTION = []
def help(task):
'''print help on specific task'''
from ape import tasks
tasks.help(taskname=task)
def explain_feature(featurename):
'''print the location of single feature and its version
if the feature is located inside a git repository,
this will also print the git-rev and modified files
'''
import os
import featuremonkey
import importlib
import subprocess
def guess_version(feature_module):
if hasattr(feature_module, '__version__'):
return feature_module.__version__
if hasattr(feature_module, 'get_version'):
return feature_module.get_version()
return ('unable to determine version:'
' please add __version__ or get_version()'
' to this feature module!')
def git_rev(module):
stdout, stderr = subprocess.Popen(
["git", "rev-parse", "HEAD"],
cwd=os.path.dirname(module.__file__),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
if 'Not a git repo' in stderr:
return '-'
else:
return stdout.strip()
def git_changes(module):
stdout = subprocess.Popen(
["git", "diff", "--name-only"],
cwd=os.path.dirname(module.__file__),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()[0]
return stdout.strip() or '-'
if featurename in featuremonkey.get_features_from_equation_file(os.environ['PRODUCT_EQUATION_FILENAME']):
print()
print(featurename)
print('-' * 60)
print()
is_subfeature = '.features.' in featurename
try:
feature_module = importlib.import_module(featurename)
except ImportError:
print('Error: unable to import feature "%s"' % featurename)
print('Location: %s' % os.path.dirname(feature_module.__file__))
print()
if is_subfeature:
print('Version: see parent feature')
print()
else:
print('Version: %s' % str(guess_version(feature_module)))
print()
print('git: %s' % git_rev(feature_module))
print()
print('git changed: %s' % '\n\t\t'.join(git_changes(feature_module).split('\n')))
else:
print('No feature named ' + featurename)
def explain_features():
'''print the location of each feature and its version
if the feature is located inside a git repository, this will also print the git-rev and modified files
'''
from ape import tasks
import featuremonkey
import os
featurenames = featuremonkey.get_features_from_equation_file(os.environ['PRODUCT_EQUATION_FILENAME'])
for featurename in featurenames:
tasks.explain_feature(featurename)
def selftest():
'''run ape tests'''
from ape import tests
result = tests.run_all()
if not result.wasSuccessful():
raise Exception('Selftests failed! :(')
|
{
"content_hash": "7bf78010dc1e3981c4889c34d91549b6",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 109,
"avg_line_length": 30.394736842105264,
"alnum_prop": 0.6138528138528139,
"repo_name": "henzk/ape",
"id": "42f84c378f4fbfdb40f27677294732afbf7a5afb",
"size": "3465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ape/_tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75028"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
}
|
"""Tests for prompt generation."""
import unittest
import os
from IPython.testing import tools as tt, decorators as dec
from IPython.core.prompts import PromptManager, LazyEvaluate
from IPython.testing.globalipapp import get_ipython
from IPython.utils.tempdir import TemporaryDirectory
from IPython.utils import py3compat
from IPython.utils.py3compat import unicode_type
ip = get_ipython()
class PromptTests(unittest.TestCase):
def setUp(self):
self.pm = PromptManager(shell=ip, config=ip.config)
def test_multiline_prompt(self):
self.pm.in_template = "[In]\n>>>"
self.pm.render('in')
self.assertEqual(self.pm.width, 3)
self.assertEqual(self.pm.txtwidth, 3)
self.pm.in_template = '[In]\n'
self.pm.render('in')
self.assertEqual(self.pm.width, 0)
self.assertEqual(self.pm.txtwidth, 0)
def test_translate_abbreviations(self):
def do_translate(template):
self.pm.in_template = template
return self.pm.templates['in']
pairs = [(r'%n>', '{color.number}{count}{color.prompt}>'),
(r'\T', '{time}'),
(r'\n', '\n')
]
tt.check_pairs(do_translate, pairs)
def test_user_ns(self):
self.pm.color_scheme = 'NoColor'
ip.ex("foo='bar'")
self.pm.in_template = "In [{foo}]"
prompt = self.pm.render('in')
self.assertEqual(prompt, u'In [bar]')
def test_builtins(self):
self.pm.color_scheme = 'NoColor'
self.pm.in_template = "In [{int}]"
prompt = self.pm.render('in')
self.assertEqual(prompt, u"In [%r]" % int)
def test_undefined(self):
self.pm.color_scheme = 'NoColor'
self.pm.in_template = "In [{foo_dne}]"
prompt = self.pm.render('in')
self.assertEqual(prompt, u"In [<ERROR: 'foo_dne' not found>]")
def test_render(self):
self.pm.in_template = r'\#>'
self.assertEqual(self.pm.render('in',color=False), '%d>' % ip.execution_count)
@dec.onlyif_unicode_paths
def test_render_unicode_cwd(self):
save = py3compat.getcwd()
with TemporaryDirectory(u'ünicødé') as td:
os.chdir(td)
self.pm.in_template = r'\w [\#]'
p = self.pm.render('in', color=False)
self.assertEqual(p, u"%s [%i]" % (py3compat.getcwd(), ip.execution_count))
os.chdir(save)
def test_lazy_eval_unicode(self):
u = u'ünicødé'
lz = LazyEvaluate(lambda : u)
# str(lz) would fail
self.assertEqual(unicode_type(lz), u)
self.assertEqual(format(lz), u)
def test_lazy_eval_nonascii_bytes(self):
u = u'ünicødé'
b = u.encode('utf8')
lz = LazyEvaluate(lambda : b)
# unicode(lz) would fail
self.assertEqual(str(lz), str(b))
self.assertEqual(format(lz), str(b))
def test_lazy_eval_float(self):
f = 0.503
lz = LazyEvaluate(lambda : f)
self.assertEqual(str(lz), str(f))
self.assertEqual(unicode_type(lz), unicode_type(f))
self.assertEqual(format(lz), str(f))
self.assertEqual(format(lz, '.1'), '0.5')
@dec.skip_win32
def test_cwd_x(self):
self.pm.in_template = r"\X0"
save = py3compat.getcwd()
os.chdir(os.path.expanduser('~'))
p = self.pm.render('in', color=False)
try:
self.assertEqual(p, '~')
finally:
os.chdir(save)
|
{
"content_hash": "6760b7a8b2eb1da684ce67650b97b1b2",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 86,
"avg_line_length": 32.027027027027025,
"alnum_prop": 0.5727144866385373,
"repo_name": "WillisXChen/django-oscar",
"id": "7226af1209b16b041496ba57b42f67fed5bf8c05",
"size": "3584",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/IPython/core/tests/test_prompts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "78"
},
{
"name": "C",
"bytes": "5979"
},
{
"name": "C++",
"bytes": "572"
},
{
"name": "CSS",
"bytes": "694578"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Groff",
"bytes": "21346"
},
{
"name": "HTML",
"bytes": "708061"
},
{
"name": "JavaScript",
"bytes": "1433937"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Makefile",
"bytes": "6656"
},
{
"name": "Python",
"bytes": "47548581"
},
{
"name": "Shell",
"bytes": "6790"
},
{
"name": "Smarty",
"bytes": "21023"
},
{
"name": "TeX",
"bytes": "56837"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
}
|
import csv #! Comma-Separated Values - tabular data in plain text.
import os #! Miscellaneous operating system interfaces.
import sys #! System-specific parameters and functions.
from time import sleep #! Sleep - for command-line animation.
#! Function for printing slowly (animation) -- ex: print_slowly ('Hello World!')
def print_slowly(text):
for glyph in text:
print glyph,
sys.stdout.flush() #! I'm not sure if this is necesarry.
sleep(0.05)
#! Let the user know it's working, slowly.
print ''
print_slowly ('farmigotools\n\nft_farmigo_to_route4me.py')
print ''
print ''
sleep(0.3)
#! Make a new folder in the current directory for the outputed files.
output_dir = 'CSVs_4_route4me'
print ''
print ''
print 'Making a new folder called -- ' + output_dir
sleep(0.3)
try:
os.makedirs(output_dir)
except OSError:
# Directory already exists, move on.
pass
#! Loop through every file in the current working directory
for csv_input_filename in os.listdir('.'):
if not csv_input_filename.endswith('.csv'):
continue # skip non-csv files
print ''
print 'Processing -- ' + csv_input_filename + '...'
print ''
#! Print column headers
csv_input_file = open(csv_input_filename)
csv_output_file = open(os.path.join(output_dir, csv_input_filename), 'w')
csv_reader = csv.reader(csv_input_file)
csv_writer = csv.writer(csv_output_file)
#! create variables from csv
for row in csv_reader:
bag_type = row[1]
last_name = row[2]
first_name = row[3]
primary_phone = row[4]
secondary_phone = row[5]
address = row[7]
city = row[8]
state = row[9]
zipcode = row[10]
blank = None
#! output reformated CSV
output_row = [
bag_type,
address + ' ' + city + ' ' + state + ' ' + zipcode,
first_name + ' ' + last_name + ' | ' +
primary_phone + ' | ' + secondary_phone,
'Run 1']
csv_writer.writerow(output_row)
csv_output_file.close()
#! Let the user know it's done
print_slowly ('## DONE!')
print ('')
print ('')
|
{
"content_hash": "922e39984a4582c9581dde8bd884f5b5",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 30.23611111111111,
"alnum_prop": 0.6067983463481855,
"repo_name": "eliheuer/FarmigoDataTools",
"id": "8f8002dfb5bb00290639b16e747b2218f697e94d",
"size": "2180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f_isolate_pm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9440"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from sentry.models import GroupStatus
from sentry.testutils import TestCase
from sentry.search.utils import parse_query
class ParseQueryTest(TestCase):
def test_simple(self):
result = parse_query('foo bar', self.user)
assert result == {'tags': {}, 'query': 'foo bar'}
def test_useless_prefix(self):
result = parse_query('foo: bar', self.user)
assert result == {'tags': {}, 'query': 'foo: bar'}
def test_mix_tag_and_query(self):
result = parse_query('foo bar key:value', self.user)
assert result == {'tags': {'key': 'value'}, 'query': 'foo bar'}
def test_single_tag(self):
result = parse_query('key:value', self.user)
assert result == {'tags': {'key': 'value'}, 'query': ''}
def test_tag_with_colon_in_value(self):
result = parse_query('url:http://example.com', self.user)
assert result == {'tags': {'url': 'http://example.com'}, 'query': ''}
def test_multiple_tags(self):
result = parse_query('foo:bar key:value', self.user)
assert result == {'tags': {'key': 'value', 'foo': 'bar'}, 'query': ''}
def test_single_tag_with_quotes(self):
result = parse_query('foo:"bar"', self.user)
assert result == {'tags': {'foo': 'bar'}, 'query': ''}
def test_tag_with_quotes_and_query(self):
result = parse_query('key:"a value" hello', self.user)
assert result == {'tags': {'key': 'a value'}, 'query': 'hello'}
def test_is_resolved(self):
result = parse_query('is:resolved', self.user)
assert result == {'status': GroupStatus.RESOLVED, 'tags': {}, 'query': ''}
def test_assigned_me(self):
result = parse_query('assigned:me', self.user)
assert result == {'assigned_to': self.user, 'tags': {}, 'query': ''}
def test_assigned_email(self):
result = parse_query('assigned:%s' % (self.user.email,), self.user)
assert result == {'assigned_to': self.user, 'tags': {}, 'query': ''}
def test_assigned_unknown_user(self):
result = parse_query('assigned:fake@example.com', self.user)
assert result['assigned_to'].id == 0
|
{
"content_hash": "6fa35040890db6faefaa88bdd820b986",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 82,
"avg_line_length": 39.763636363636365,
"alnum_prop": 0.5925925925925926,
"repo_name": "argonemyth/sentry",
"id": "055cd2a9e9a0c0aae8b5abe70ebbdaf57031501e",
"size": "2187",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/sentry/search/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583460"
},
{
"name": "HTML",
"bytes": "316232"
},
{
"name": "JavaScript",
"bytes": "608538"
},
{
"name": "Makefile",
"bytes": "2715"
},
{
"name": "Python",
"bytes": "5918211"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchMSELoss(ModelLayer):
def __init__(self, model, input_record, name='batch_mse_loss', **kwargs):
super(BatchMSELoss, self).__init__(model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar())
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
prediction = net.Squeeze(
self.input_record.prediction(),
net.NextScopedBlob('squeezed_prediction'),
dims=[1]
)
label = self.input_record.label.field_blobs()
if self.input_record.label.field_type().base != (
self.input_record.prediction.field_type().base):
label = net.Cast(
label,
net.NextScopedBlob('cast_label'),
to=schema.data_type_for_dtype(
self.input_record.prediction.field_type()
)
)
label = net.StopGradient(
label,
net.NextScopedBlob('stopped_label')
)
l2dist = net.SquaredL2Distance(
[label, prediction],
net.NextScopedBlob('l2')
)
net.AveragedLoss(l2dist, self.output_schema.field_blobs())
|
{
"content_hash": "d0fa0cd363f0b81e908d8c6b50daf120",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 28.746031746031747,
"alnum_prop": 0.5681943677526229,
"repo_name": "pietern/caffe2",
"id": "85b1802b5c5c9420683a96da68e38ae09e8edfb1",
"size": "2554",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "caffe2/python/layers/batch_mse_loss.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5415"
},
{
"name": "C",
"bytes": "316608"
},
{
"name": "C++",
"bytes": "4743501"
},
{
"name": "CMake",
"bytes": "139649"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "671183"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "1225"
},
{
"name": "Metal",
"bytes": "36752"
},
{
"name": "Objective-C",
"bytes": "6505"
},
{
"name": "Objective-C++",
"bytes": "239139"
},
{
"name": "Python",
"bytes": "2902249"
},
{
"name": "Shell",
"bytes": "31734"
}
],
"symlink_target": ""
}
|
"""Admin application."""
import importlib
import pkgutil
from flask_admin import Admin
from flask_admin.base import AdminIndexView, MenuLink
from flask.ext.admin.contrib.sqla import ModelView
from flask_login import current_user
from raven.contrib.flask import Sentry
from pygotham import factory, filters
__all__ = ('create_app',)
class HomeView(AdminIndexView):
"""Only show the admin to authenticated admin users."""
def is_accessible(self):
return current_user.has_role('admin')
def create_app(settings_override=None):
"""Return the PyGotham admin application.
:param settings_override: a ``dict`` of settings to override.
"""
app = factory.create_app(__name__, __path__, settings_override)
Sentry(app)
app.jinja_env.filters['rst'] = filters.rst_to_html
# Because the admin is being wrapped inside an app, the url needs to
# be overridden to use / instead of the default of /admin/. One of
# the side effects of doing this is that the static assets won't
# serve correctly without overriding static_url_path as well.
admin = Admin(
app, name='PyGotham',
static_url_path='/admin',
subdomain='<event_slug>',
index_view=HomeView(endpoint='', url='/'),
template_mode='bootstrap3',
)
# Iterate through all the modules of the current package. For each
# module, check the public API for any instances of types that can
# be added to the Flask-Admin menu and register them.
for _, name, _ in pkgutil.iter_modules(__path__):
module = importlib.import_module('{}.{}'.format(__name__, name))
for attr in dir(module):
view = getattr(module, attr)
if isinstance(view, ModelView):
admin.add_view(view)
elif isinstance(view, MenuLink):
admin.add_link(view)
return app
|
{
"content_hash": "d06dd5743f03e004faff73d1da836f48",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 72,
"avg_line_length": 31.88135593220339,
"alnum_prop": 0.6608187134502924,
"repo_name": "PyGotham/pygotham",
"id": "7d218855d9b739bbb8f997829c79400842f03488",
"size": "1881",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pygotham/admin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "56204"
},
{
"name": "HTML",
"bytes": "37411"
},
{
"name": "JavaScript",
"bytes": "116"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "113486"
},
{
"name": "Ruby",
"bytes": "1526"
},
{
"name": "Shell",
"bytes": "587"
}
],
"symlink_target": ""
}
|
from ..Qt import QtGui, QtCore, QT_LIB
import matplotlib
if QT_LIB != 'PyQt5':
if QT_LIB == 'PySide':
matplotlib.rcParams['backend.qt4']='PySide'
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
try:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
except ImportError:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
else:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MatplotlibWidget(QtGui.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
QtGui.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
|
{
"content_hash": "dbb3137f577b1af2ad99435ee6acb9ff",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 99,
"avg_line_length": 32.10204081632653,
"alnum_prop": 0.6713286713286714,
"repo_name": "meganbkratz/acq4",
"id": "c5b6c980188d92cf52baec1c331caf9bcb699f94",
"size": "1573",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "acq4/pyqtgraph/widgets/MatplotlibWidget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Arduino",
"bytes": "18651"
},
{
"name": "Batchfile",
"bytes": "64"
},
{
"name": "C",
"bytes": "705091"
},
{
"name": "C++",
"bytes": "321384"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "MATLAB",
"bytes": "1752"
},
{
"name": "Objective-C",
"bytes": "596020"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "5922488"
}
],
"symlink_target": ""
}
|
import roslib
roslib.load_manifest('faa_image_processing')
import sys
import rospy
import cv
import cv2
import numpy
import os
import copy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from faa_utilities import FileTools
from faa_actuation.msg import ActuationState
from faa_image_processing import Tracker, Drawer, Parameters
from faa_image_processing.srv import SaveImage, SaveImageResponse
from faa_image_processing.srv import SetTracking, SetTrackingResponse
from faa_image_processing.srv import GetParameters, GetParametersResponse
from faa_image_processing.srv import SetParameter, SetParameterResponse
from faa_image_processing.srv import SetArrayParameter, SetArrayParameterResponse
from faa_image_processing.srv import SetStatus, SetStatusResponse
from faa_image_processing.msg import TrackingData, TunnelData
file_tools = FileTools()
class ImageProcessor(object):
def __init__(self):
self.reusing_bg_images = rospy.get_param('/camera/faa_image_processing/reusing_bg_images')
self.image_p_pub = rospy.Publisher("image_processed",Image)
self.image_d_pub = rospy.Publisher("data_image",Image)
self.tracking_data_pub = rospy.Publisher("tracking_data",TrackingData)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("image_conditioned",Image,self.conditioned_image_callback)
self.actuation_state_sub = rospy.Subscriber("/faa_actuation/actuation_state",ActuationState,self.actuation_state_callback)
self.tracking = False
self.drawing = False
self.tracker = Tracker()
self.drawer = Drawer()
self.parameters = Parameters()
self.tracker.setParameters(self.parameters)
self.drawer.setParameters(self.parameters)
self.display_images = False
if self.display_images:
# cv.NamedWindow("Image Processed", 1)
# cv.NamedWindow("Image Tracked", 1)
cv.NamedWindow("Image Data", 1)
self.image_conditioned = None
self.sbi = rospy.Service('/faa_image_processing/save_background_image', SaveImage, self.save_background_image_callback)
self.st = rospy.Service('/faa_image_processing/set_tracking', SetTracking, self.set_tracking_callback)
self.gp = rospy.Service('/faa_image_processing/get_parameters', GetParameters, self.get_parameters_callback)
self.sp = rospy.Service('/faa_image_processing/set_parameter', SetParameter, self.set_parameter_callback)
self.sap = rospy.Service('/faa_image_processing/set_array_parameter', SetArrayParameter, self.set_array_parameter_callback)
self.ss = rospy.Service('/faa_image_processing/set_status', SetStatus, self.set_status_callback)
def conditioned_image_callback(self,data):
self.parameters.increment_image_count()
self.tracker.setParameters(self.parameters)
self.drawer.setParameters(self.parameters)
try:
image_cv = self.bridge.imgmsg_to_cv(data, "passthrough")
except CvBridgeError, e:
print e
image_np = numpy.asarray(image_cv)
self.image_conditioned = numpy.copy(image_np)
if self.tracking:
# data_tracked,image_tracked = self.tracker.processImage(image_np)
data_tracked,data_image = self.tracker.processImage(image_np)
tracking_data = TrackingData()
tracking_data.header = data.header
tracking_data.image_count = self.parameters.image_count
# rospy.logwarn(str(data_tracked))
# try:
# data_tracked_tunnels = data_tracked['tunnels']
# except KeyError:
# data_tracked_tunnels = []
# if 0 < len(data_tracked_tunnels):
# for tunnel in range(len(data_tracked_tunnels)):
tunnel_data_list = []
tunnels = range(self.parameters.tunnel_count)
for tunnel in tunnels:
tunnel_data = TunnelData()
enabled = self.parameters.tunnels_enabled[tunnel]
tunnel_data.tunnel = tunnel
tunnel_data.enabled = enabled
tunnel_data.gate0 = ""
tunnel_data.gate1 = ""
tunnel_data.gate2 = ""
tunnel_data.fly_x = 0
tunnel_data.fly_y = 0
tunnel_data.fly_angle = 0
tunnel_data.chamber = ""
tunnel_data.blob_x = 0
tunnel_data.blob_y = 0
tunnel_data.blob_area = 0
tunnel_data.blob_slope = 0
tunnel_data.blob_ecc = 0
if enabled:
try:
tunnel_data.gate0 = data_tracked[tunnel]['gate0']
tunnel_data.gate1 = data_tracked[tunnel]['gate1']
tunnel_data.gate2 = data_tracked[tunnel]['gate2']
except KeyError:
pass
try:
tunnel_data.fly_x = data_tracked[tunnel]['fly_x']
tunnel_data.fly_y = data_tracked[tunnel]['fly_y']
tunnel_data.fly_angle = data_tracked[tunnel]['fly_angle']
tunnel_data.chamber = data_tracked[tunnel]['chamber']
tunnel_data.blob_x = data_tracked[tunnel]['blob_x']
tunnel_data.blob_y = data_tracked[tunnel]['blob_y']
tunnel_data.blob_area = data_tracked[tunnel]['blob_area']
tunnel_data.blob_slope = data_tracked[tunnel]['blob_slope']
tunnel_data.blob_ecc = data_tracked[tunnel]['blob_ecc']
except KeyError:
pass
tunnel_data_list.append(tunnel_data)
tracking_data.tunnel_data = tunnel_data_list
self.tracking_data_pub.publish(tracking_data)
data_image_cv = cv.fromarray(data_image)
try:
data_img = self.bridge.cv_to_imgmsg(data_image_cv, "bgr8")
data_img.header = data.header
self.image_d_pub.publish(data_img)
except CvBridgeError, e:
print e
# image_tracked_cv = cv.fromarray(image_tracked)
# if self.display_images:
# cv.ShowImage("Image Tracked", image_tracked_cv)
# cv.WaitKey(3)
else:
data_tracked = {}
if self.drawing:
image_processed = self.drawer.processImage(image_np,data_tracked)
else:
image_processed = cv2.cvtColor(image_np,cv2.COLOR_GRAY2RGB)
image_cv = cv.fromarray(image_processed)
# if self.display_images:
# cv.ShowImage("Image Processed", image_cv)
# cv.WaitKey(3)
try:
self.image_p_pub.publish(self.bridge.cv_to_imgmsg(image_cv, "bgr8"))
except CvBridgeError, e:
print e
# image_trimmed = self.trim_image(image_np)
# image_trimmed_cv = cv.fromarray(image_trimmed)
# if self.display_images:
# cv.ShowImage("Image Data", image_trimmed_cv)
# cv.WaitKey(3)
# try:
# self.image_d_pub.publish(self.bridge.cv_to_imgmsg(image_trimmed_cv, "mono8"))
# except CvBridgeError, e:
# print e
# def trim_image(self,image_o):
# image_t = numpy.zeros((ty,tx*self.parameters.tunnel_count),image_o.dtype)
# for tunnel in range(self.parameters.tunnel_count):
# if self.parameters.tunnels_enabled[tunnel]:
# x_offset_o = self.parameters.tunnel_x_offsets[tunnel]
# x_offset_t = tunnel*tx
# tx0_o = self.parameters.tunnel_mask['x0'] + x_offset_o
# ty0_o = self.parameters.tunnel_mask['y0']
# tx1_o = self.parameters.tunnel_mask['x1'] + x_offset_o
# ty1_o = self.parameters.tunnel_mask['y1']
# tx0_t = x_offset_t
# ty0_t = 0
# tx1_t = self.parameters.tunnel_mask['x1'] + x_offset_t
# ty1_t = self.parameters.tunnel_mask['y1']
# image_t[ty0_t:ty1_t,tx0_t:tx1_t] = image_o[ty0_o:ty1_o,tx0_o:tx1_o]
# return image_t
def save_background_image_callback(self,req):
if self.image_conditioned is not None:
self.drawing = True
(path,filename) = os.path.split(req.image_path)
if not self.reusing_bg_images:
image_background = numpy.copy(self.image_conditioned)
else:
image_background = file_tools.read_image_file(filename)
file_tools.write_image_file(req.image_path,image_background)
# rospy.logwarn("save_background_image_callback: " + filename)
if filename == 'bg_gates_opened.png':
self.tracker.setBgImageGatesOpened(image_background)
elif filename == 'bg_gates_closed.png':
self.tracker.setBgImageGatesClosed(image_background)
return SaveImageResponse("success")
def set_tracking_callback(self,req):
self.tracking = req.tracking
self.parameters.reset_image_count()
self.tracker.setParameters(self.parameters)
self.drawer.setParameters(self.parameters)
return SetTrackingResponse("success")
def actuation_state_callback(self,req):
self.tracker.setTunnelsState(req.tunnels_state)
def get_parameters_callback(self,req):
return GetParametersResponse(self.parameters.get_parameters_json())
def set_parameter_callback(self,req):
self.parameters.set_parameter(req.name,req.value)
self.tracker.setParameters(self.parameters)
self.drawer.setParameters(self.parameters)
return SetParameterResponse("success")
def set_array_parameter_callback(self,req):
self.parameters.set_array_parameter(req.name,req.array)
self.tracker.setParameters(self.parameters)
self.drawer.setParameters(self.parameters)
return SetArrayParameterResponse("success")
def set_status_callback(self,req):
self.parameters.status = req.status
self.tracker.setParameters(self.parameters)
self.drawer.setParameters(self.parameters)
return SetStatusResponse("success")
def main(args):
rospy.init_node('faa_image_processing', anonymous=True)
ip = ImageProcessor()
try:
rospy.spin()
except KeyboardInterrupt:
print "Shutting down"
if ip.display_images:
cv.DestroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
{
"content_hash": "4f4ec7830a8db37f44313b22aa66a765",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 127,
"avg_line_length": 38.57258064516129,
"alnum_prop": 0.681685134852603,
"repo_name": "JaneliaSciComp/fly-alcohol-assay",
"id": "923854e8f14afd06ec0b2e25537b372fb31fbea2",
"size": "9588",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "faa_image_processing/nodes/image_processing.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "392"
},
{
"name": "C++",
"bytes": "36131"
},
{
"name": "CMake",
"bytes": "26184"
},
{
"name": "CSS",
"bytes": "3706"
},
{
"name": "HTML",
"bytes": "68111"
},
{
"name": "JavaScript",
"bytes": "173781"
},
{
"name": "Makefile",
"bytes": "908"
},
{
"name": "Python",
"bytes": "444569"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
}
|
"""
Component to wake up devices sending Wake-On-LAN magic packets.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/wake_on_lan/
"""
from functools import partial
import logging
import voluptuous as vol
from homeassistant.const import CONF_MAC
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['wakeonlan==1.1.6']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'wake_on_lan'
CONF_BROADCAST_ADDRESS = 'broadcast_address'
SERVICE_SEND_MAGIC_PACKET = 'send_magic_packet'
WAKE_ON_LAN_SEND_MAGIC_PACKET_SCHEMA = vol.Schema({
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_BROADCAST_ADDRESS): cv.string,
})
async def async_setup(hass, config):
"""Set up the wake on LAN component."""
import wakeonlan
async def send_magic_packet(call):
"""Send magic packet to wake up a device."""
mac_address = call.data.get(CONF_MAC)
broadcast_address = call.data.get(CONF_BROADCAST_ADDRESS)
_LOGGER.info("Send magic packet to mac %s (broadcast: %s)",
mac_address, broadcast_address)
if broadcast_address is not None:
await hass.async_add_job(
partial(wakeonlan.send_magic_packet, mac_address,
ip_address=broadcast_address))
else:
await hass.async_add_job(
partial(wakeonlan.send_magic_packet, mac_address))
hass.services.async_register(
DOMAIN, SERVICE_SEND_MAGIC_PACKET, send_magic_packet,
schema=WAKE_ON_LAN_SEND_MAGIC_PACKET_SCHEMA)
return True
|
{
"content_hash": "1e8947618d9ee4a1ef0a9f35a00ba139",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 75,
"avg_line_length": 30.660377358490567,
"alnum_prop": 0.6750769230769231,
"repo_name": "PetePriority/home-assistant",
"id": "dba99bf7e3d4e693f10cc6556e2592c1083515f0",
"size": "1625",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/wake_on_lan/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
"""Interface Package Interfaces
$Id: interfaces.py 67803 2006-05-01 15:20:47Z jim $
"""
__docformat__ = 'restructuredtext'
from zope.interface import Interface
from zope.interface.interface import Attribute
class IElement(Interface):
"""Objects that have basic documentation and tagged values.
"""
__name__ = Attribute('__name__', 'The object name')
__doc__ = Attribute('__doc__', 'The object doc string')
def getTaggedValue(tag):
"""Returns the value associated with `tag`.
Raise a `KeyError` of the tag isn't set.
"""
def queryTaggedValue(tag, default=None):
"""Returns the value associated with `tag`.
Return the default value of the tag isn't set.
"""
def getTaggedValueTags():
"""Returns a list of all tags."""
def setTaggedValue(tag, value):
"""Associates `value` with `key`."""
class IAttribute(IElement):
"""Attribute descriptors"""
interface = Attribute('interface',
'Stores the interface instance in which the '
'attribute is located.')
class IMethod(IAttribute):
"""Method attributes"""
def getSignatureInfo():
"""Returns the signature information.
This method returns a dictionary with the following keys:
o `positional` - All positional arguments.
o `required` - A list of all required arguments.
o `optional` - A list of all optional arguments.
o `varargs` - The name of the varargs argument.
o `kwargs` - The name of the kwargs argument.
"""
def getSignatureString():
"""Return a signature string suitable for inclusion in documentation.
This method returns the function signature string. For example, if you
have `func(a, b, c=1, d='f')`, then the signature string is `(a, b,
c=1, d='f')`.
"""
class ISpecification(Interface):
"""Object Behavioral specifications"""
def extends(other, strict=True):
"""Test whether a specification extends another
The specification extends other if it has other as a base
interface or if one of it's bases extends other.
If strict is false, then the specification extends itself.
"""
def isOrExtends(other):
"""Test whether the specification is or extends another
"""
def weakref(callback=None):
"""Return a weakref to the specification
This method is, regrettably, needed to allow weakrefs to be
computed to security-proxied specifications. While the
zope.interface package does not require zope.security or
zope.proxy, it has to be able to coexist with it.
"""
__bases__ = Attribute("""Base specifications
A tuple if specifications from which this specification is
directly derived.
""")
__sro__ = Attribute("""Specification-resolution order
A tuple of the specification and all of it's ancestor
specifications from most specific to least specific.
(This is similar to the method-resolution order for new-style classes.)
""")
def get(name, default=None):
"""Look up the description for a name
If the named attribute is not defined, the default is
returned.
"""
class IInterface(ISpecification, IElement):
"""Interface objects
Interface objects describe the behavior of an object by containing
useful information about the object. This information includes:
o Prose documentation about the object. In Python terms, this
is called the "doc string" of the interface. In this element,
you describe how the object works in prose language and any
other useful information about the object.
o Descriptions of attributes. Attribute descriptions include
the name of the attribute and prose documentation describing
the attributes usage.
o Descriptions of methods. Method descriptions can include:
- Prose "doc string" documentation about the method and its
usage.
- A description of the methods arguments; how many arguments
are expected, optional arguments and their default values,
the position or arguments in the signature, whether the
method accepts arbitrary arguments and whether the method
accepts arbitrary keyword arguments.
o Optional tagged data. Interface objects (and their attributes and
methods) can have optional, application specific tagged data
associated with them. Examples uses for this are examples,
security assertions, pre/post conditions, and other possible
information you may want to associate with an Interface or its
attributes.
Not all of this information is mandatory. For example, you may
only want the methods of your interface to have prose
documentation and not describe the arguments of the method in
exact detail. Interface objects are flexible and let you give or
take any of these components.
Interfaces are created with the Python class statement using
either Interface.Interface or another interface, as in::
from zope.interface import Interface
class IMyInterface(Interface):
'''Interface documentation'''
def meth(arg1, arg2):
'''Documentation for meth'''
# Note that there is no self argument
class IMySubInterface(IMyInterface):
'''Interface documentation'''
def meth2():
'''Documentation for meth2'''
You use interfaces in two ways:
o You assert that your object implement the interfaces.
There are several ways that you can assert that an object
implements an interface:
1. Call zope.interface.implements in your class definition.
2. Call zope.interfaces.directlyProvides on your object.
3. Call 'zope.interface.classImplements' to assert that instances
of a class implement an interface.
For example::
from zope.interface import classImplements
classImplements(some_class, some_interface)
This approach is useful when it is not an option to modify
the class source. Note that this doesn't affect what the
class itself implements, but only what its instances
implement.
o You query interface meta-data. See the IInterface methods and
attributes for details.
"""
def providedBy(object):
"""Test whether the interface is implemented by the object
Return true of the object asserts that it implements the
interface, including asserting that it implements an extended
interface.
"""
def implementedBy(class_):
"""Test whether the interface is implemented by instances of the class
Return true of the class asserts that its instances implement the
interface, including asserting that they implement an extended
interface.
"""
def names(all=False):
"""Get the interface attribute names
Return a sequence of the names of the attributes, including
methods, included in the interface definition.
Normally, only directly defined attributes are included. If
a true positional or keyword argument is given, then
attributes defined by base classes will be included.
"""
def namesAndDescriptions(all=False):
"""Get the interface attribute names and descriptions
Return a sequence of the names and descriptions of the
attributes, including methods, as name-value pairs, included
in the interface definition.
Normally, only directly defined attributes are included. If
a true positional or keyword argument is given, then
attributes defined by base classes will be included.
"""
def __getitem__(name):
"""Get the description for a name
If the named attribute is not defined, a KeyError is raised.
"""
def direct(name):
"""Get the description for the name if it was defined by the interface
If the interface doesn't define the name, returns None.
"""
def validateInvariants(obj, errors=None):
"""Validate invariants
Validate object to defined invariants. If errors is None,
raises first Invalid error; if errors is a list, appends all errors
to list, then raises Invalid with the errors as the first element
of the "args" tuple."""
def __contains__(name):
"""Test whether the name is defined by the interface"""
def __iter__():
"""Return an iterator over the names defined by the interface
The names iterated include all of the names defined by the
interface directly and indirectly by base interfaces.
"""
__module__ = Attribute("""The name of the module defining the interface""")
class IDeclaration(ISpecification):
"""Interface declaration
Declarations are used to express the interfaces implemented by
classes or provided by objects.
"""
def __contains__(interface):
"""Test whether an interface is in the specification
Return true if the given interface is one of the interfaces in
the specification and false otherwise.
"""
def __iter__():
"""Return an iterator for the interfaces in the specification
"""
def flattened():
"""Return an iterator of all included and extended interfaces
An iterator is returned for all interfaces either included in
or extended by interfaces included in the specifications
without duplicates. The interfaces are in "interface
resolution order". The interface resolution order is such that
base interfaces are listed after interfaces that extend them
and, otherwise, interfaces are included in the order that they
were defined in the specification.
"""
def __sub__(interfaces):
"""Create an interface specification with some interfaces excluded
The argument can be an interface or an interface
specifications. The interface or interfaces given in a
specification are subtracted from the interface specification.
Removing an interface that is not in the specification does
not raise an error. Doing so has no effect.
Removing an interface also removes sub-interfaces of the interface.
"""
def __add__(interfaces):
"""Create an interface specification with some interfaces added
The argument can be an interface or an interface
specifications. The interface or interfaces given in a
specification are added to the interface specification.
Adding an interface that is already in the specification does
not raise an error. Doing so has no effect.
"""
def __nonzero__():
"""Return a true value of the interface specification is non-empty
"""
class IInterfaceDeclaration(Interface):
"""Declare and check the interfaces of objects
The functions defined in this interface are used to declare the
interfaces that objects provide and to query the interfaces that have
been declared.
Interfaces can be declared for objects in two ways:
- Interfaces are declared for instances of the object's class
- Interfaces are declared for the object directly.
The interfaces declared for an object are, therefore, the union of
interfaces declared for the object directly and the interfaces
declared for instances of the object's class.
Note that we say that a class implements the interfaces provided
by it's instances. An instance can also provide interfaces
directly. The interfaces provided by an object are the union of
the interfaces provided directly and the interfaces implemented by
the class.
"""
def providedBy(ob):
"""Return the interfaces provided by an object
This is the union of the interfaces directly provided by an
object and interfaces implemented by it's class.
The value returned is an IDeclaration.
"""
def implementedBy(class_):
"""Return the interfaces implemented for a class' instances
The value returned is an IDeclaration.
"""
def classImplements(class_, *interfaces):
"""Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Consider the following example::
class C(A, B):
...
classImplements(C, I1, I2)
Instances of ``C`` provide ``I1``, ``I2``, and whatever interfaces
instances of ``A`` and ``B`` provide.
"""
def implementer(*interfaces):
"""Create a decorator for declaring interfaces implemented by a facory
A callable is returned that makes an implements declaration on
objects passed to it.
"""
def classImplementsOnly(class_, *interfaces):
"""Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) replace any previous declarations.
Consider the following example::
class C(A, B):
...
classImplements(C, IA, IB. IC)
classImplementsOnly(C. I1, I2)
Instances of ``C`` provide only ``I1``, ``I2``, and regardless of
whatever interfaces instances of ``A`` and ``B`` implement.
"""
def directlyProvidedBy(object):
"""Return the interfaces directly provided by the given object
The value returned is an IDeclaration.
"""
def directlyProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) replace interfaces previously
declared for the object.
Consider the following example::
class C(A, B):
...
ob = C()
directlyProvides(ob, I1, I2)
The object, ``ob`` provides ``I1``, ``I2``, and whatever interfaces
instances have been declared for instances of ``C``.
To remove directly provided interfaces, use ``directlyProvidedBy`` and
subtract the unwanted interfaces. For example::
directlyProvides(ob, directlyProvidedBy(ob)-I2)
removes I2 from the interfaces directly provided by
``ob``. The object, ``ob`` no longer directly provides ``I2``,
although it might still provide ``I2`` if it's class
implements ``I2``.
To add directly provided interfaces, use ``directlyProvidedBy`` and
include additional interfaces. For example::
directlyProvides(ob, directlyProvidedBy(ob), I2)
adds I2 to the interfaces directly provided by ob.
"""
def alsoProvides(object, *interfaces):
"""Declare additional interfaces directly for an object::
alsoProvides(ob, I1)
is equivalent to::
directivelyProvides(ob, directlyProvidedBy(ob), I1)
"""
def noLongerProvides(object, interface):
"""Remove an interface from the list of an object's directly
provided interfaces::
noLongerProvides(ob, I1)
is equivalent to::
directlyProvides(ob, directlyProvidedBy(ob)-I1)
with the exception that if ``I1`` is an interface that is
provided by ``ob`` through the class's implementation,
ValueError is raised.
"""
def implements(*interfaces):
"""Declare interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
Consider the following example::
class C(A, B):
implements(I1, I2)
Instances of ``C`` implement ``I1``, ``I2``, and whatever interfaces
instances of ``A`` and ``B`` implement.
"""
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
Consider the following example::
class C(A, B):
implementsOnly(I1, I2)
Instances of ``C`` implement ``I1``, ``I2``, regardless of what
instances of ``A`` and ``B`` implement.
"""
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The given interfaces (including the interfaces in the
specifications) are used to create the class's direct-object
interface specification. An error will be raised if the module
class has an direct interface specification. In other words, it is
an error to call this function more than once in a class
definition.
Note that the given interfaces have nothing to do with the
interfaces implemented by instances of the class.
This function is provided for convenience. It provides a more
convenient way to call directlyProvides for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
"""
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The given interfaces (including the interfaces in the
specifications) are used to create the module's direct-object
interface specification. An error will be raised if the module
already has an interface specification. In other words, it is
an error to call this function more than once in a module
definition.
This function is provided for convenience. It provides a more
convenient way to call directlyProvides for a module. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
def Declaration(*interfaces):
"""Create an interface specification
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
A new interface specification (IDeclaration) with
the given interfaces is returned.
"""
class IAdapterRegistry(Interface):
"""Provide an interface-based registry for adapters
This registry registers objects that are in some sense "from" a
sequence of specification to an interface and a name.
No specific semantics are assumed for the registered objects,
however, the most common application will be to register factories
that adapt objects providing required specifications to a provided
interface.
"""
def register(required, provided, name, value):
"""Register a value
A value is registered for a *sequence* of required specifications, a
provided interface, and a name.
"""
def registered(required, provided, name=u''):
"""Return the component registered for the given interfaces and name
Unlike the lookup method, this methods won't retrieve
components registered for more specific required interfaces or
less specific provided interfaces.
If no component was registered exactly for the given
interfaces and name, then None is returned.
"""
def lookup(required, provided, name='', default=None):
"""Lookup a value
A value is looked up based on a *sequence* of required
specifications, a provided interface, and a name.
"""
def queryMultiAdapter(objects, provided, name=u'', default=None):
"""Adapt a sequence of objects to a named, provided, interface
"""
def lookup1(required, provided, name=u'', default=None):
"""Lookup a value using a single required interface
A value is looked up based on a single required
specifications, a provided interface, and a name.
"""
def queryAdapter(object, provided, name=u'', default=None):
"""Adapt an object using a registered adapter factory.
"""
def adapter_hook(provided, object, name=u'', default=None):
"""Adapt an object using a registered adapter factory.
"""
def lookupAll(required, provided):
"""Find all adapters from the required to the provided interfaces
An iterable object is returned that provides name-value two-tuples.
"""
def names(required, provided):
"""Return the names for which there are registered objects
"""
def subscribe(required, provided, subscriber, name=u''):
"""Register a subscriber
A subscriber is registered for a *sequence* of required
specifications, a provided interface, and a name.
Multiple subscribers may be registered for the same (or
equivalent) interfaces.
"""
def subscriptions(required, provided, name=u''):
"""Get a sequence of subscribers
Subscribers for a *sequence* of required interfaces, and a provided
interface are returned.
"""
def subscribers(objects, provided, name=u''):
"""Get a sequence of subscription adapters
"""
|
{
"content_hash": "9e2ef0a64a1e83629c0de9b2122587cd",
"timestamp": "",
"source": "github",
"line_count": 717,
"max_line_length": 79,
"avg_line_length": 32.62900976290098,
"alnum_prop": 0.6626202179952981,
"repo_name": "GetSomeBlocks/ServerStatus",
"id": "6deac8cb60dc7c95bd9e0a46844612f32a42394b",
"size": "24032",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "resources/lib/zope.interface/zope/interface/interfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "930"
},
{
"name": "C",
"bytes": "293000"
},
{
"name": "C#",
"bytes": "9664"
},
{
"name": "CSS",
"bytes": "24716"
},
{
"name": "D",
"bytes": "542"
},
{
"name": "HTML",
"bytes": "374176"
},
{
"name": "Java",
"bytes": "206"
},
{
"name": "Objective-C",
"bytes": "9421"
},
{
"name": "Python",
"bytes": "8744725"
},
{
"name": "Ruby",
"bytes": "6773"
},
{
"name": "Shell",
"bytes": "13600"
}
],
"symlink_target": ""
}
|
import imp
import os
import sys
import shutil
import six
from migrate import exceptions
from migrate.versioning import version, repository
from migrate.versioning.script import *
from migrate.versioning.util import *
from migrate.tests import fixture
from migrate.tests.fixture.models import tmp_sql_table
class TestBaseScript(fixture.Pathed):
def test_all(self):
"""Testing all basic BaseScript operations"""
# verify / source / run
src = self.tmp()
open(src, 'w').close()
bscript = BaseScript(src)
BaseScript.verify(src)
self.assertEqual(bscript.source(), '')
self.assertRaises(NotImplementedError, bscript.run, 'foobar')
class TestPyScript(fixture.Pathed, fixture.DB):
cls = PythonScript
def test_create(self):
"""We can create a migration script"""
path = self.tmp_py()
# Creating a file that doesn't exist should succeed
self.cls.create(path)
self.assertTrue(os.path.exists(path))
# Created file should be a valid script (If not, raises an error)
self.cls.verify(path)
# Can't create it again: it already exists
self.assertRaises(exceptions.PathFoundError,self.cls.create,path)
@fixture.usedb(supported='sqlite')
def test_run(self):
script_path = self.tmp_py()
pyscript = PythonScript.create(script_path)
pyscript.run(self.engine, 1)
pyscript.run(self.engine, -1)
self.assertRaises(exceptions.ScriptError, pyscript.run, self.engine, 0)
self.assertRaises(exceptions.ScriptError, pyscript._func, 'foobar')
# clean pyc file
if six.PY3:
os.remove(imp.cache_from_source(script_path))
else:
os.remove(script_path + 'c')
# test deprecated upgrade/downgrade with no arguments
contents = open(script_path, 'r').read()
f = open(script_path, 'w')
f.write(contents.replace("upgrade(migrate_engine)", "upgrade()"))
f.close()
pyscript = PythonScript(script_path)
pyscript._module = None
try:
pyscript.run(self.engine, 1)
pyscript.run(self.engine, -1)
except exceptions.ScriptError:
pass
else:
self.fail()
def test_verify_notfound(self):
"""Correctly verify a python migration script: nonexistant file"""
path = self.tmp_py()
self.assertFalse(os.path.exists(path))
# Fails on empty path
self.assertRaises(exceptions.InvalidScriptError,self.cls.verify,path)
self.assertRaises(exceptions.InvalidScriptError,self.cls,path)
def test_verify_invalidpy(self):
"""Correctly verify a python migration script: invalid python file"""
path=self.tmp_py()
# Create empty file
f = open(path,'w')
f.write("def fail")
f.close()
self.assertRaises(Exception,self.cls.verify_module,path)
# script isn't verified on creation, but on module reference
py = self.cls(path)
self.assertRaises(Exception,(lambda x: x.module),py)
def test_verify_nofuncs(self):
"""Correctly verify a python migration script: valid python file; no upgrade func"""
path = self.tmp_py()
# Create empty file
f = open(path, 'w')
f.write("def zergling():\n\tprint('rush')")
f.close()
self.assertRaises(exceptions.InvalidScriptError, self.cls.verify_module, path)
# script isn't verified on creation, but on module reference
py = self.cls(path)
self.assertRaises(exceptions.InvalidScriptError,(lambda x: x.module),py)
@fixture.usedb(supported='sqlite')
def test_preview_sql(self):
"""Preview SQL abstract from ORM layer (sqlite)"""
path = self.tmp_py()
f = open(path, 'w')
content = '''
from migrate import *
from sqlalchemy import *
metadata = MetaData()
UserGroup = Table('Link', metadata,
Column('link1ID', Integer),
Column('link2ID', Integer),
UniqueConstraint('link1ID', 'link2ID'))
def upgrade(migrate_engine):
metadata.create_all(migrate_engine)
'''
f.write(content)
f.close()
pyscript = self.cls(path)
SQL = pyscript.preview_sql(self.url, 1)
self.assertEqualIgnoreWhitespace("""
CREATE TABLE "Link"
("link1ID" INTEGER,
"link2ID" INTEGER,
UNIQUE ("link1ID", "link2ID"))
""", SQL)
# TODO: test: No SQL should be executed!
def test_verify_success(self):
"""Correctly verify a python migration script: success"""
path = self.tmp_py()
# Succeeds after creating
self.cls.create(path)
self.cls.verify(path)
# test for PythonScript.make_update_script_for_model
@fixture.usedb()
def test_make_update_script_for_model(self):
"""Construct script source from differences of two models"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertTrue("['User'].create()" in source_script)
self.assertTrue("['User'].drop()" in source_script)
@fixture.usedb()
def test_make_update_script_for_equal_models(self):
"""Try to make update script from two identical models"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source + self.model_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertFalse('User.create()' in source_script)
self.assertFalse('User.drop()' in source_script)
@fixture.usedb()
def test_make_update_script_direction(self):
"""Check update scripts go in the right direction"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertTrue(0
< source_script.find('upgrade')
< source_script.find("['User'].create()")
< source_script.find('downgrade')
< source_script.find("['User'].drop()"))
def setup_model_params(self):
self.script_path = self.tmp_py()
self.repo_path = self.tmp()
self.first_model_path = os.path.join(self.temp_usable_dir, 'testmodel_first.py')
self.second_model_path = os.path.join(self.temp_usable_dir, 'testmodel_second.py')
self.base_source = """from sqlalchemy import *\nmeta = MetaData()\n"""
self.model_source = """
User = Table('User', meta,
Column('id', Integer, primary_key=True),
Column('login', Unicode(40)),
Column('passwd', String(40)),
)"""
self.repo = repository.Repository.create(self.repo_path, 'repo')
self.pyscript = PythonScript.create(self.script_path)
sys.modules.pop('testmodel_first', None)
sys.modules.pop('testmodel_second', None)
def write_file(self, path, contents):
f = open(path, 'w')
f.write(contents)
f.close()
class TestSqlScript(fixture.Pathed, fixture.DB):
@fixture.usedb()
def test_error(self):
"""Test if exception is raised on wrong script source"""
src = self.tmp()
f = open(src, 'w')
f.write("""foobar""")
f.close()
sqls = SqlScript(src)
self.assertRaises(Exception, sqls.run, self.engine)
@fixture.usedb()
def test_success(self):
"""Test sucessful SQL execution"""
# cleanup and prepare python script
tmp_sql_table.metadata.drop_all(self.engine, checkfirst=True)
script_path = self.tmp_py()
pyscript = PythonScript.create(script_path)
# populate python script
contents = open(script_path, 'r').read()
contents = contents.replace("pass", "tmp_sql_table.create(migrate_engine)")
contents = 'from migrate.tests.fixture.models import tmp_sql_table\n' + contents
f = open(script_path, 'w')
f.write(contents)
f.close()
# write SQL script from python script preview
pyscript = PythonScript(script_path)
src = self.tmp()
f = open(src, 'w')
f.write(pyscript.preview_sql(self.url, 1))
f.close()
# run the change
sqls = SqlScript(src)
sqls.run(self.engine)
tmp_sql_table.metadata.drop_all(self.engine, checkfirst=True)
@fixture.usedb()
def test_transaction_management_statements(self):
"""
Test that we can successfully execute SQL scripts with transaction
management statements.
"""
for script_pattern in (
"BEGIN TRANSACTION; %s; COMMIT;",
"BEGIN; %s; END TRANSACTION;",
):
test_statement = ("CREATE TABLE TEST1 (field1 int); "
"DROP TABLE TEST1")
script = script_pattern % test_statement
src = self.tmp()
with open(src, 'wt') as f:
f.write(script)
sqls = SqlScript(src)
sqls.run(self.engine)
|
{
"content_hash": "93af135241f6e0920ede3777973122b5",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 92,
"avg_line_length": 34.43150684931507,
"alnum_prop": 0.611597374179431,
"repo_name": "watspidererik/testenv",
"id": "954bc0d881050a87fbe93fdd6ae4fb5fe22b9c6a",
"size": "10101",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "flask/lib/python2.7/site-packages/migrate/tests/versioning/test_script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6349"
},
{
"name": "CSS",
"bytes": "11367"
},
{
"name": "JavaScript",
"bytes": "22141"
},
{
"name": "Python",
"bytes": "10555714"
},
{
"name": "Shell",
"bytes": "3745"
}
],
"symlink_target": ""
}
|
"""
concerts/management/commands/make_matches.py
Uses the Artist.re_string to search for the artist in the
concert lineup (Concert.billing). If a match is found, saves a
ConcertMatch object for lookup later.
"""
import re
import logging
from django.core.management.base import BaseCommand, CommandError
from concerts.models import Artist, Concert, ConcertMatch, Venue
logger = logging.getLogger('concerts.data_management')
class Command(BaseCommand):
help = 'Looks for tracked artists in the concert billings and saves matches'
def handle(self, *args, **options):
artist_pairs = list(Artist.objects.filter(is_active=True).values_list('id', 're_string'))
concert_pairs = list(Concert.objects.filter(is_active=True).values_list('id', 'billing'))
match_count = 0
for artist_id, regex_string in artist_pairs:
artist_regex = re.compile(
r'{}'.format(regex_string),
flags=re.IGNORECASE|re.MULTILINE|re.DOTALL,
)
for concert_id, concert_billing in concert_pairs:
if artist_regex.search(concert_billing):
concert_matched = Concert.objects.get(id=concert_id)
artist_matched = Artist.objects.get(id=artist_id)
logger.info(
"Matched artist {} and concert {}".format(
artist_matched, concert_matched
)
)
# check for existing concertmatch
# TODO but hacky? --review models--
match, created = ConcertMatch.objects.get_or_create(
concert=concert_matched
)
concert_matched.artists.add(artist_matched)
concert_matched.save()
match.artists.add(artist_matched)
match.save()
match_count += 1
logger.info("Saved {} matches".format(match_count))
# on first match, create entry in ConcertMatch, add artist to Concert.artists
# subsequent matches add artist to ConcertMatch.artists, Concert.artists
|
{
"content_hash": "ac21bcbfedb785f26581432ca782cc75",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 97,
"avg_line_length": 37.62068965517241,
"alnum_prop": 0.5962419798350137,
"repo_name": "jravesloot/sift_app",
"id": "9c0d594dd09f1ecf18142dba396a8e69a1f33b84",
"size": "2182",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sift/concerts/management/commands/make_matches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10007"
},
{
"name": "Python",
"bytes": "82597"
}
],
"symlink_target": ""
}
|
"""Tracing Protocol for tf.function.
TODO(b/202447704): Briefly describe the tracing, retracing, and how trace types
control it.
"""
from tensorflow.core.function.trace_type.signature_builder import make_function_signature
from tensorflow.core.function.trace_type.signature_builder import SignatureContext
from tensorflow.core.function.trace_type.signature_builder import WeakrefDeletionObserver
|
{
"content_hash": "137379a2103d6d891fa43f584dce466f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 89,
"avg_line_length": 36.36363636363637,
"alnum_prop": 0.83,
"repo_name": "Intel-Corporation/tensorflow",
"id": "1e354166515627b8b69d0a759e2bc4a310b4d49b",
"size": "1089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/core/function/trace_type/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, PasswordField, TextAreaField
from wtforms.validators import DataRequired, Length, Email, ValidationError
from app.models import User, Post, Page
from app import bcrypt
from unidecode import unidecode
from sqlalchemy import func
import re
class LoginForm(Form):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password',
validators=[DataRequired(), Length(min=8)])
remember_me = BooleanField('Remember Me', default=False)
def validate_username(self, field):
user = User.query.filter(func.lower(User.username) ==
self.username.data.lower()).first()
if user is None:
raise ValidationError('User does not exist')
def validate_password(self, field):
user = User.query.filter(func.lower(User.username) ==
self.username.data.lower()).first()
if user:
if not bcrypt.check_password_hash(user.password,
self.password.data):
raise ValidationError('Password is incorrect')
class RegisterForm(Form):
first_name = StringField('First Name', validators=[DataRequired()])
last_name = StringField('Last Name', validators=[DataRequired()])
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email Address', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
def validate_username(self, field):
if User.query.filter(func.lower(User.username) ==
self.username.data.lower()).count() > 0:
raise ValidationError('Username already exists.')
def validate_email(self, field):
if User.query.filter(func.lower(User.email) ==
self.email.data.lower()).count() > 0:
raise ValidationError('Email already in use.')
class PostForm(Form):
title = StringField('Title',
validators=[DataRequired("Please enter a title!")])
post_short = TextAreaField('Short Post')
post_body = TextAreaField('Post Body',
validators=[DataRequired("Please enter a post body!")])
tags = StringField('Tags',
validators=[DataRequired("Please enter post tags!")])
def generate_slug(self, title):
title_slug = re.sub(r'\W+', '-',
unidecode(title).lower().rstrip(r' .?!(),[]{}'))
if Post.query.filter_by(title_slug=title_slug).count() > 0:
raise ValidationError('Title is already taken')
return title_slug
class EditUser(Form):
first_name = StringField('First Name',
validators=[DataRequired("First name is required!")])
last_name = StringField('Last Name',
validators=[DataRequired("Last name is required!")])
username = StringField('Username',
validators=[DataRequired("Username is required!")])
email = StringField('Email Address',
validators=[DataRequired("Email is required!"), Email()])
about_me = TextAreaField('About Me',
validators=[Length(max=200, message="About me must be less than 200 characters!")])
def __init__(self, orig_user, orig_email, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.orig_user = orig_user
self.orig_email = orig_email
def validate_username(self, field):
if self.orig_user and self.username.data.lower() == self.orig_user.lower():
return True
if User.query.filter(func.lower(User.username) ==
self.username.data.lower()).count() > 0:
raise ValidationError('Username is taken.')
def validate_email(self, field):
if self.orig_email and self.email.data.lower() == self.orig_email.lower():
return True
if User.query.filter(func.lower(User.email) ==
self.email.data.lower()).count() > 0:
raise ValidationError('Email already exists.')
class PageForm(Form):
title = StringField('Page Title',
validators=[DataRequired("Page must have a title")])
content = TextAreaField('Page Content',
validators=[DataRequired("Page must have content")])
def generate_slug(self, title):
title_slug = re.sub(r'\W+', '-',
unidecode(title).lower().rstrip(r' .?!(),[]{}'))
if Page.query.filter_by(title_slug=title_slug).count() > 0:
raise ValidationError('Title is already taken')
return title_slug
|
{
"content_hash": "789b631393c62b2045fda8cb778dee52",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 112,
"avg_line_length": 44.18181818181818,
"alnum_prop": 0.5940329218106996,
"repo_name": "jaymickey/jmickey-blog",
"id": "9deba68cf1e6a0026834d83ed312c347f34be8a4",
"size": "4860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3841"
},
{
"name": "HTML",
"bytes": "26955"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "17415"
}
],
"symlink_target": ""
}
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Species mapper.
"""
from sqlalchemy.orm import relationship
from everest.repositories.rdb.utils import as_slug_expression
from everest.repositories.rdb.utils import mapper
from thelma.entities.gene import Gene
from thelma.entities.species import Species
__docformat__ = 'reStructuredText en'
__all__ = ['create_mapper']
def create_mapper(species_tbl):
"Mapper factory."
m = mapper(Species, species_tbl,
id_attribute='species_id',
slug_expression=lambda cls: as_slug_expression(cls.common_name),
properties=dict(
genes=relationship(Gene,
back_populates='species'),
),
)
return m
|
{
"content_hash": "d250e0f005af3c25ead1f27f777fb5c5",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 31.24137931034483,
"alnum_prop": 0.6434878587196468,
"repo_name": "helixyte/TheLMA",
"id": "ee5f901ee342e6d933c9d2ec0a8fa27889013b44",
"size": "906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thelma/repositories/rdb/mappers/species.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3126"
},
{
"name": "Python",
"bytes": "3329729"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from poetry.core.constraints.version import constraint_regions
__all__ = ["constraint_regions"]
|
{
"content_hash": "dac694a94df34f90142a01c1f4696a80",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 62,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.7611940298507462,
"repo_name": "python-poetry/poetry-core",
"id": "eabcc06b08b4735b921c04e1795f584964f7b5b1",
"size": "134",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/poetry/core/semver/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2664"
},
{
"name": "Makefile",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2084191"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
import sys, os
import mysql.connector
"""
Example using MySQL Connector/Python showing:
* sending multiple statements and iterating over the results
"""
def main(config):
output = []
db = mysql.connector.Connect(**config)
cursor = db.cursor()
# Drop table if exists, and create it new
stmt_drop = "DROP TABLE IF EXISTS names"
cursor.execute(stmt_drop)
stmt_create = """
CREATE TABLE names (
id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
name VARCHAR(30) DEFAULT '' NOT NULL,
info TEXT DEFAULT '',
age TINYINT UNSIGNED DEFAULT '30',
PRIMARY KEY (id)
)"""
cursor.execute(stmt_create)
info = "abc"*10000
stmts = [
"INSERT INTO names (name) VALUES ('Geert')",
"SELECT COUNT(*) AS cnt FROM names",
"INSERT INTO names (name) VALUES ('Jan'),('Michel')",
"SELECT name FROM names",
]
# Note 'multi=True' when calling cursor.execute()
for result in cursor.execute(' ; '.join(stmts), multi=True):
if result.with_rows:
if result.statement == stmts[3]:
output.append("Names in table: " +
' '.join([ name[0] for name in result]))
else:
output.append(
"Number of rows: {}".format(result.fetchone()[0]))
else:
output.append("Inserted {} row{}".format(result.rowcount,
's' if result.rowcount > 1 else ''))
cursor.execute(stmt_drop)
cursor.close()
db.close()
return output
if __name__ == '__main__':
#
# Configure MySQL login and database to use in config.py
#
from config import Config
config = Config.dbinfo().copy()
out = main(config)
print('\n'.join(out))
|
{
"content_hash": "be9512200c66da8f2ba5e1b87bdf5c46",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 70,
"avg_line_length": 27.348484848484848,
"alnum_prop": 0.5634349030470914,
"repo_name": "rcosnita/fantastico",
"id": "82e3c2776bac2847801abb289d8f00f65da6cea4",
"size": "2982",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "virtual_env/libs/mysql-connector/python3/examples/multi_resultsets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6802"
},
{
"name": "Python",
"bytes": "2168052"
},
{
"name": "Shell",
"bytes": "13309"
}
],
"symlink_target": ""
}
|
"""Testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order
from tensorflow.python.framework import test_util as _test_util
from tensorflow.python.platform import googletest as _googletest
# pylint: disable=unused-import
from tensorflow.python.framework.test_util import assert_equal_graph_def
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase
from tensorflow.python.framework.test_util import gpu_device_name
from tensorflow.python.framework.test_util import is_gpu_available
from tensorflow.python.ops.gradient_checker import compute_gradient_error
from tensorflow.python.ops.gradient_checker import compute_gradient
# pylint: enable=unused-import,g-bad-import-order
import functools
import sys
from tensorflow.python.util.tf_export import tf_export
if sys.version_info.major == 2:
import mock # pylint: disable=g-import-not-at-top,unused-import
else:
from unittest import mock # pylint: disable=g-import-not-at-top,g-importing-member
tf_export(v1=['test.mock'])(mock)
# Import Benchmark class
Benchmark = _googletest.Benchmark # pylint: disable=invalid-name
# Import StubOutForTesting class
StubOutForTesting = _googletest.StubOutForTesting # pylint: disable=invalid-name
@tf_export('test.main')
def main(argv=None):
"""Runs all unit tests."""
_test_util.InstallStackTraceHandler()
return _googletest.main(argv)
@tf_export(v1=['test.get_temp_dir'])
def get_temp_dir():
"""Returns a temporary directory for use during tests.
There is no need to delete the directory after the test.
Returns:
The temporary directory.
"""
return _googletest.GetTempDir()
@tf_export(v1=['test.test_src_dir_path'])
def test_src_dir_path(relative_path):
"""Creates an absolute test srcdir path given a relative path.
Args:
relative_path: a path relative to tensorflow root.
e.g. "core/platform".
Returns:
An absolute path to the linked in runfiles.
"""
return _googletest.test_src_dir_path(relative_path)
@tf_export('test.is_built_with_cuda')
def is_built_with_cuda():
"""Returns whether TensorFlow was built with CUDA (GPU) support.
This method should only be used in tests written with `tf.test.TestCase`. A
typical usage is to skip tests that should only run with CUDA (GPU).
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_cuda():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device("GPU:0"):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
TensorFlow official binary is built with CUDA.
"""
return _test_util.IsGoogleCudaEnabled()
@tf_export('test.is_built_with_rocm')
def is_built_with_rocm():
"""Returns whether TensorFlow was built with ROCm (GPU) support.
This method should only be used in tests written with `tf.test.TestCase`. A
typical usage is to skip tests that should only run with ROCm (GPU).
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_rocm():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device("GPU:0"):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
TensorFlow official binary is NOT built with ROCm.
"""
return _test_util.IsBuiltWithROCm()
@tf_export('test.disable_with_predicate')
def disable_with_predicate(pred, skip_message):
"""Disables the test if pred is true."""
def decorator_disable_with_predicate(func):
@functools.wraps(func)
def wrapper_disable_with_predicate(self, *args, **kwargs):
if pred():
self.skipTest(skip_message)
else:
return func(self, *args, **kwargs)
return wrapper_disable_with_predicate
return decorator_disable_with_predicate
@tf_export('test.is_built_with_gpu_support')
def is_built_with_gpu_support():
"""Returns whether TensorFlow was built with GPU (CUDA or ROCm) support.
This method should only be used in tests written with `tf.test.TestCase`. A
typical usage is to skip tests that should only run with GPU.
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_gpu_support():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device("GPU:0"):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
TensorFlow official binary is built with CUDA GPU support.
"""
return is_built_with_cuda() or is_built_with_rocm()
@tf_export('test.is_built_with_xla')
def is_built_with_xla():
"""Returns whether TensorFlow was built with XLA support.
This method should only be used in tests written with `tf.test.TestCase`. A
typical usage is to skip tests that should only run with XLA.
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_xla(self):
... if not tf.test.is_built_with_xla():
... self.skipTest("test is only applicable on XLA")
... @tf.function(jit_compile=True)
... def add(x, y):
... return tf.math.add(x, y)
...
... self.assertEqual(add(tf.ones(()), tf.ones(())), 2.0)
TensorFlow official binary is built with XLA.
"""
return _test_util.IsBuiltWithXLA()
|
{
"content_hash": "6353f5b406c54e021f347dd150c221ce",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 85,
"avg_line_length": 30.698863636363637,
"alnum_prop": 0.6947991856376088,
"repo_name": "petewarden/tensorflow",
"id": "ca48f71b2bbec41f129244890ee732d757099289",
"size": "6093",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/platform/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from openerp.osv import fields, osv
class account_move_line_unreconcile_select(osv.osv_memory):
_name = "account.move.line.unreconcile.select"
_description = "Unreconciliation"
_columns ={
'account_id': fields.many2one('account.account','Account',required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','<>',False),('state','<>','draft')]" % data['account_id'],
'name': 'Unreconciliation',
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "f03067d5a6f0ed365143d862eda6435e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 124,
"avg_line_length": 40.22727272727273,
"alnum_prop": 0.5649717514124294,
"repo_name": "cristianquaglio/odoo",
"id": "31fbeddeea8d74fe71e153fac7e5da6cbe1de022",
"size": "1864",
"binary": false,
"copies": "378",
"ref": "refs/heads/master",
"path": "addons/account/wizard/account_move_line_unreconcile_select.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "671328"
},
{
"name": "HTML",
"bytes": "212829"
},
{
"name": "JavaScript",
"bytes": "5984109"
},
{
"name": "Makefile",
"bytes": "12332"
},
{
"name": "Mako",
"bytes": "561"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "8366254"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "19163"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "92945"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table('blog_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=40)),
('text', self.gf('ckeditor.fields.RichTextField')()),
('status', self.gf('django.db.models.fields.CharField')(max_length=9, default='published')),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date_created', self.gf('django.db.models.fields.DateTimeField')()),
('last_modified', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('blog', ['Post'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table('blog_post')
models = {
'blog.post': {
'Meta': {'ordering': "['-pub_date']", 'object_name': 'Post'},
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '40'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '9', 'default': "'published'"}),
'text': ('ckeditor.fields.RichTextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog']
|
{
"content_hash": "3f9394be2413bd806ee328059d60c406",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 111,
"avg_line_length": 47,
"alnum_prop": 0.5833745670460169,
"repo_name": "dangerdak/apuniverse",
"id": "bcd70ef1874d58e050acbea82f3d55b24a7a02b8",
"size": "2045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apuniverse/blog/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "148317"
},
{
"name": "JavaScript",
"bytes": "4039056"
},
{
"name": "Makefile",
"bytes": "5612"
},
{
"name": "Python",
"bytes": "81901"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.shortcuts import redirect
from login import auth
class User_Data(object):
def __init__(self):
self.username = ""
self.email = ""
self.default_shipping_address = ""
self.phone_number = ""
self.tw_id = ""
self.real_name = ""
class oauth(object):
def __init__(self, request):
self.redirect_uri = self.get_callback_uri(request)
def login(self, request):
pass
def callback(self, request):
pass
def get_userdata_and_uuid(self, request):
pass
def init_user(self, uuid, access_token):
if auth.hasUser(uuid) and auth.hasProfile(uuid):
return False
else:
if auth.create_empty_user(uuid, self.provider_name, access_token):
return True
else:
raise RuntimeError
def init_session_with_uuid(self, uuid, request):
if auth.hasUser(uuid):
if auth.create_session(request, uuid):
if auth.hasProfile(uuid):
return redirect("digikey.views.progress_page")
else:
return redirect("login.views.profile")
else:
raise RuntimeError
else:
raise RuntimeError
def get_callback_uri(self, request):
redirect_uri = "http://chiphub.c4labs.xyz/" + self.provider_name + "_callback"
if settings.DEBUG == True:
redirect_uri = "http://" + request.META['HTTP_HOST'] + "/" + self.provider_name + "_callback"
return redirect_uri
|
{
"content_hash": "a9aee8f9063d2de3e52c8998dfdde841",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 105,
"avg_line_length": 28.821428571428573,
"alnum_prop": 0.5656753407682775,
"repo_name": "sonicyang/chiphub",
"id": "13ed9b0f21120c0cc8e6b51865901611b5fe6be2",
"size": "1614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "login/oauth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "298157"
},
{
"name": "HTML",
"bytes": "89822"
},
{
"name": "JavaScript",
"bytes": "285818"
},
{
"name": "Python",
"bytes": "89817"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from djangocms_googlemap import __version__
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-googlemap',
version=__version__,
description='Google Maps plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-googlemap',
packages=['djangocms_googlemap', 'djangocms_googlemap.migrations', 'djangocms_googlemap.migrations_django'],
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
|
{
"content_hash": "34cd55438ddd2a04b823af842d6aa1ee",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 112,
"avg_line_length": 35.06060606060606,
"alnum_prop": 0.6698357821953328,
"repo_name": "mkost/djangocms-googlemap",
"id": "08b3881e0a96b5a354140496002fa0d1b672ae75",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3892"
},
{
"name": "Python",
"bytes": "26738"
}
],
"symlink_target": ""
}
|
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBasicTextFieldEmbedder(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
params = Params(
{
"token_embedders": {
"words1": {"type": "embedding", "embedding_dim": 2},
"words2": {"type": "embedding", "embedding_dim": 5},
"words3": {"type": "embedding", "embedding_dim": 3},
}
}
)
self.token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
self.inputs = {
"words1": {"tokens": torch.LongTensor([[0, 2, 3, 5]])},
"words2": {"tokens": torch.LongTensor([[1, 4, 3, 2]])},
"words3": {"tokens": torch.LongTensor([[1, 5, 1, 2]])},
}
def test_get_output_dim_aggregates_dimension_from_each_embedding(self):
assert self.token_embedder.get_output_dim() == 10
def test_forward_asserts_input_field_match(self):
# Total mismatch
self.inputs["words4"] = self.inputs["words3"]
del self.inputs["words3"]
with pytest.raises(ConfigurationError) as exc:
self.token_embedder(self.inputs)
assert exc.match("Mismatched token keys")
self.inputs["words3"] = self.inputs["words4"]
# Text field has too many inputs
with pytest.raises(ConfigurationError) as exc:
self.token_embedder(self.inputs)
assert exc.match("Mismatched token keys")
del self.inputs["words4"]
def test_forward_concats_resultant_embeddings(self):
assert self.token_embedder(self.inputs).size() == (1, 4, 10)
def test_forward_works_on_higher_order_input(self):
params = Params(
{
"token_embedders": {
"words": {"type": "embedding", "num_embeddings": 20, "embedding_dim": 2},
"characters": {
"type": "character_encoding",
"embedding": {"embedding_dim": 4, "num_embeddings": 15},
"encoder": {
"type": "cnn",
"embedding_dim": 4,
"num_filters": 10,
"ngram_filter_sizes": [3],
},
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"words": {"tokens": (torch.rand(3, 4, 5, 6) * 20).long()},
"characters": {"token_characters": (torch.rand(3, 4, 5, 6, 7) * 15).long()},
}
assert token_embedder(inputs, num_wrapping_dims=2).size() == (3, 4, 5, 6, 12)
def test_forward_runs_with_forward_params(self):
class FakeEmbedder(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, tokens: torch.Tensor, extra_arg: int = None):
assert tokens is not None
assert extra_arg is not None
return tokens
token_embedder = BasicTextFieldEmbedder({"elmo": FakeEmbedder()})
inputs = {"elmo": {"elmo_tokens": (torch.rand(3, 6, 5) * 2).long()}}
kwargs = {"extra_arg": 1}
token_embedder(inputs, **kwargs)
def test_forward_runs_with_non_bijective_mapping(self):
elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
options_file = str(elmo_fixtures_path / "options.json")
weight_file = str(elmo_fixtures_path / "lm_weights.hdf5")
params = Params(
{
"token_embedders": {
"words": {"type": "embedding", "num_embeddings": 20, "embedding_dim": 2},
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file,
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"words": {"tokens": (torch.rand(3, 6) * 20).long()},
"elmo": {"elmo_tokens": (torch.rand(3, 6, 50) * 15).long()},
}
token_embedder(inputs)
def test_forward_runs_with_non_bijective_mapping_with_null(self):
elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
options_file = str(elmo_fixtures_path / "options.json")
weight_file = str(elmo_fixtures_path / "lm_weights.hdf5")
params = Params(
{
"token_embedders": {
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {"elmo": {"elmo_tokens": (torch.rand(3, 6, 50) * 15).long()}}
token_embedder(inputs)
def test_forward_runs_with_non_bijective_mapping_with_dict(self):
elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
options_file = str(elmo_fixtures_path / "options.json")
weight_file = str(elmo_fixtures_path / "lm_weights.hdf5")
params = Params(
{
"token_embedders": {
"words": {"type": "embedding", "num_embeddings": 20, "embedding_dim": 2},
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file,
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"words": {"tokens": (torch.rand(3, 6) * 20).long()},
"elmo": {"elmo_tokens": (torch.rand(3, 6, 50) * 15).long()},
}
token_embedder(inputs)
def test_forward_runs_with_bijective_and_non_bijective_mapping(self):
params = Params(
{
"token_embedders": {
"bert": {"type": "pretrained_transformer", "model_name": "bert-base-uncased"},
"token_characters": {
"type": "character_encoding",
"embedding": {"embedding_dim": 5},
"encoder": {
"type": "cnn",
"embedding_dim": 5,
"num_filters": 5,
"ngram_filter_sizes": [5],
},
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"bert": {
"token_ids": (torch.rand(3, 5) * 10).long(),
"mask": (torch.rand(3, 5) * 1).bool(),
},
"token_characters": {"token_characters": (torch.rand(3, 5, 5) * 1).long()},
}
token_embedder(inputs)
|
{
"content_hash": "6b88fedd623ef61c125912bb6377cdac",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 98,
"avg_line_length": 40.61904761904762,
"alnum_prop": 0.5004559072554383,
"repo_name": "allenai/allennlp",
"id": "7381ac6d246a9ead4833915de046999eaeffae48",
"size": "7677",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/modules/text_field_embedders/basic_text_field_embedder_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39870"
},
{
"name": "Dockerfile",
"bytes": "1190"
},
{
"name": "Jsonnet",
"bytes": "4469"
},
{
"name": "Makefile",
"bytes": "5306"
},
{
"name": "Perl",
"bytes": "101"
},
{
"name": "Python",
"bytes": "3575059"
},
{
"name": "Scilab",
"bytes": "4085"
},
{
"name": "Shell",
"bytes": "2092"
}
],
"symlink_target": ""
}
|
import re
import ConfigParser
from devstack import cfg_helpers
from devstack import date
from devstack import env
from devstack import exceptions as excp
from devstack import log as logging
from devstack import settings
from devstack import shell as sh
from devstack import utils
LOG = logging.getLogger("devstack.cfg")
ENV_PAT = re.compile(r"^\s*\$\{([\w\d]+):\-(.*)\}\s*$")
SUB_MATCH = re.compile(r"(?:\$\(([\w\d]+):([\w\d]+))\)")
CACHE_MSG = "(value will now be internally cached)"
def get_config(cfg_fn=None, cfg_cls=None):
if not cfg_fn:
cfg_fn = sh.canon_path(settings.STACK_CONFIG_LOCATION)
if not cfg_cls:
cfg_cls = StackConfigParser
config_instance = cfg_cls()
config_instance.read(cfg_fn)
return config_instance
class IgnoreMissingConfigParser(ConfigParser.RawConfigParser):
DEF_INT = 0
DEF_FLOAT = 0.0
DEF_BOOLEAN = False
DEF_BASE = None
def __init__(self):
ConfigParser.RawConfigParser.__init__(self)
#make option names case sensitive
self.optionxform = str
def get(self, section, option):
value = IgnoreMissingConfigParser.DEF_BASE
try:
value = ConfigParser.RawConfigParser.get(self, section, option)
except ConfigParser.NoSectionError:
pass
except ConfigParser.NoOptionError:
pass
return value
def getboolean(self, section, option):
if not self.has_option(section, option):
return IgnoreMissingConfigParser.DEF_BOOLEAN
return ConfigParser.RawConfigParser.getboolean(self, section, option)
def getfloat(self, section, option):
if not self.has_option(section, option):
return IgnoreMissingConfigParser.DEF_FLOAT
return ConfigParser.RawConfigParser.getfloat(self, section, option)
def getint(self, section, option):
if not self.has_option(section, option):
return IgnoreMissingConfigParser.DEF_INT
return ConfigParser.RawConfigParser.getint(self, section, option)
class StackConfigParser(IgnoreMissingConfigParser):
def __init__(self):
IgnoreMissingConfigParser.__init__(self)
self.configs_fetched = dict()
def _resolve_value(self, section, option, value_gotten):
if section == 'host' and option == 'ip':
LOG.debug("Host ip from configuration/environment was empty, programatically attempting to determine it.")
value_gotten = utils.get_host_ip()
LOG.debug("Determined your host ip to be: [%s]" % (value_gotten))
return value_gotten
def getdefaulted(self, section, option, default_val):
val = self.get(section, option)
if not val or not val.strip():
LOG.debug("Value [%s] found was not good enough, returning provided default [%s]" % (val, default_val))
return default_val
return val
def get(self, section, option):
key = cfg_helpers.make_id(section, option)
if key in self.configs_fetched:
value = self.configs_fetched.get(key)
LOG.debug("Fetched cached value [%s] for param [%s]" % (value, key))
else:
LOG.debug("Fetching value for param [%s]" % (key))
gotten_value = self._get_bashed(section, option)
value = self._resolve_value(section, option, gotten_value)
LOG.debug("Fetched [%s] for [%s] %s" % (value, key, CACHE_MSG))
self.configs_fetched[key] = value
return value
def set(self, section, option, value):
key = cfg_helpers.make_id(section, option)
LOG.audit("Setting config value [%s] for param [%s]" % (value, key))
self.configs_fetched[key] = value
IgnoreMissingConfigParser.set(self, section, option, value)
def _resolve_replacements(self, value):
LOG.debug("Performing simple replacement on [%s]", value)
#allow for our simple replacement to occur
def replacer(match):
section = match.group(1)
option = match.group(2)
return self.getdefaulted(section, option, '')
return SUB_MATCH.sub(replacer, value)
def _get_bashed(self, section, option):
value = IgnoreMissingConfigParser.get(self, section, option)
if value is None:
return value
extracted_val = ''
mtch = ENV_PAT.match(value)
if mtch:
env_key = mtch.group(1).strip()
def_val = mtch.group(2).strip()
if not def_val and not env_key:
msg = "Invalid bash-like value [%s]" % (value)
raise excp.BadParamException(msg)
env_value = env.get_key(env_key)
if env_value is None:
LOG.debug("Extracting value from config provided default value [%s]" % (def_val))
extracted_val = self._resolve_replacements(def_val)
LOG.debug("Using config provided default value [%s] (no environment key)" % (extracted_val))
else:
extracted_val = env_value
LOG.debug("Using enviroment provided value [%s]" % (extracted_val))
else:
extracted_val = value
LOG.debug("Using raw config provided value [%s]" % (extracted_val))
return extracted_val
def add_header(fn, contents):
lines = list()
lines.append('# Adjusted source file %s' % (fn.strip()))
lines.append("# On %s" % (date.rcf8222date()))
lines.append("# By user %s, group %s" % (sh.getuser(), sh.getgroupname()))
lines.append("# Comments may have been removed (TODO: darn python config writer)")
# TODO Maybe use https://code.google.com/p/iniparse/ which seems to preserve comments!
lines.append("")
if contents:
lines.append(contents)
return utils.joinlinesep(*lines)
|
{
"content_hash": "fc82c94beca4119da48b525da551d89a",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 118,
"avg_line_length": 38.59602649006622,
"alnum_prop": 0.6257721345229924,
"repo_name": "hagleitn/Openstack-Devstack2",
"id": "f7f4e1be58924666f53cea549c8bf28f7173b9f3",
"size": "6505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devstack/cfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "322122"
},
{
"name": "Shell",
"bytes": "19341"
}
],
"symlink_target": ""
}
|
from video_scripts.video_processor import *
step = parse_line('# just a comment')
print('1:', step)
step = parse_line('file path/to/file.avi')
print('2:', step)
if isinstance(step, FileSpec):
print(step.path, step.start, step.end)
step = parse_line('file path/to/file.avi -s 20 -e 44.5')
print('3:', step)
if isinstance(step, FileSpec):
print(step.path, step.start, step.end)
step = parse_line('set-video -gamma 1.4 -abc Abc ')
print('4:', step)
if isinstance(step, OptionSpec):
print(step.what, step.params)
step = parse_line('set-video -crop 1792,896,64,128 -scale 1280,640 -gamma 1.4 -speed 4 -textlist a,bc,d')
print('5:', step)
if isinstance(step, OptionSpec):
print(step.what, step.params)
step = parse_line('set-video -cited "Ala ma kota"')
print('6:', step)
if isinstance(step, OptionSpec):
print(step.what, step.params)
step = parse_line('set-video -cited "Ala ma kota" -normal Ola')
print('7:', step)
if isinstance(step, OptionSpec):
print(step.what, step.params)
|
{
"content_hash": "f36420c6de7ffdf7bdb961cd9f1b4fab",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 105,
"avg_line_length": 25.897435897435898,
"alnum_prop": 0.6801980198019802,
"repo_name": "patczar/video-scripts",
"id": "407227a15e159fe98eb1f45392a0d1b67087bae4",
"size": "1010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/examples/parsing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2005"
},
{
"name": "Makefile",
"bytes": "239"
},
{
"name": "Python",
"bytes": "9498"
},
{
"name": "Shell",
"bytes": "59833"
}
],
"symlink_target": ""
}
|
import circuits
import rexviewer as r
import naali
import urllib2 #for js_src downloading
"""
first EC handlers were not 'Naali modules' (circuits components),
but apparently they typically need to get Naali events to handle logout etc.
so am making now so that they are registered to the circuits manager automagically. the reference to the manager is not needed though, 'cause circuits supports
registering new components under a component out of the box.
"""
#import modulemanager
#import core.circuits_manager
#modulemanager_instance = core.circuits_manager.ComponentRunner.instance
"""a registry of component handlers, by type"""
handlertypes = {}
def register(compname, handlertype):
handlertypes[compname] = handlertype
import animsync
register(animsync.COMPNAME, animsync.AnimationSync)
#deprecated - see scenes/Door/
#import door
#register(door.COMPNAME, door.DoorHandler)
import rotate
register(rotate.COMPNAME, rotate.RotationHandler)
#import webmoduleloader
#register("pythonmodule", webmoduleloader.WebPythonmoduleLoader)
class ComponenthandlerRegistry(circuits.BaseComponent):
def __init__(self):
circuits.BaseComponent.__init__(self)
@circuits.handler("on_sceneadded")
def on_sceneadded(self, name):
#print "Scene added:", name#,
s = naali.getScene(name)
#s.connect("ComponentInitialized(Foundation::ComponentInterface*)", self.onComponentInitialized)
s.connect("ComponentAdded(Scene::Entity*, IComponent*, AttributeChange::Type)", self.onComponentAdded)
#def onComponentInitialized(self, comp):
# print "Comp inited:", comp
def onComponentAdded(self, entity, comp, changetype):
#print "Comp added:", entity, comp, changetype
#print comp.className()
if comp.className() == "EC_DynamicComponent":
#print "comp Name:", comp.Name
if comp.name in handlertypes:
handlertype = handlertypes[comp.name]
h = handlertype(entity, comp, changetype)
self += h #so that handlers get circuits events too
#if the data was there already, could do this.
#but it's not - must now listen to onChanged and check instead
#jssrc = comp.GetAttribute("js_src")
#print "JS SRC:", jssrc
#if jssrc is not None:
# self.apply_js(jssrc)
# jscheck = make_jssrc_handler(entity, comp, changetype)
#todo: OnChanged() is deprecated
# comp.connect("OnChanged()", jscheck)
# def make_jssrc_handler(entity, comp, changetype):
# #def handle_js():
# class JsHandler(): #need a functor so that can disconnect itself
# def __call__(self):
# jssrc = comp.GetAttribute("js_src")
# #print "JS SRC:", jssrc
# if jssrc is not None:
# apply_js(jssrc, comp)
# comp.disconnect("OnChanged()", self)
# return JsHandler()
# def apply_js(jssrc, comp):
# jscode = loadjs(jssrc)
# #print jscode
# ctx = {
# #'entity'/'this': self.entity
# 'component': comp
# }
# ent = comp.GetParentEntity()
# try:
# ent.touchable
# except AttributeError:
# pass
# else:
# ctx['touchable'] = ent.touchable
# try:
# ent.placeable
# except:
# pass
# else:
# ctx['placeable'] = ent.placeable
# naali.runjs(jscode, ctx)
# #print "-- done with js"
# def loadjs(srcurl):
# #print "js source url:", srcurl
# f = urllib2.urlopen(srcurl)
# return f.read()
|
{
"content_hash": "5d6405920d59bcc81c2e89809746f712",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 159,
"avg_line_length": 32.81818181818182,
"alnum_prop": 0.6421052631578947,
"repo_name": "antont/tundra",
"id": "520193691f3e1a5a2fe70add879b1a7489406577",
"size": "3610",
"binary": false,
"copies": "1",
"ref": "refs/heads/tundra2",
"path": "src/Application/PythonScriptModule/pymodules_old/apitest/componenthandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "110345"
},
{
"name": "C#",
"bytes": "76173"
},
{
"name": "C++",
"bytes": "4959154"
},
{
"name": "CoffeeScript",
"bytes": "2229"
},
{
"name": "JavaScript",
"bytes": "316308"
},
{
"name": "Objective-C",
"bytes": "222359"
},
{
"name": "Python",
"bytes": "999850"
},
{
"name": "Shell",
"bytes": "8224"
},
{
"name": "TypeScript",
"bytes": "230019"
}
],
"symlink_target": ""
}
|
import warnings
import os
########################################################################
# Note: imports must be limited to the maximum here, and under no circumstances
# import a package that creates a background thread at import time. Failure to
# comply will prevent lisa._unshare._do_unshare() to work correctly, as it
# cannot work if the process is multithreaded when it is called.
########################################################################
from lisa.version import __version__
# Raise an exception when a deprecated API is used from within a lisa.*
# submodule. This ensures that we don't use any deprecated APIs internally, so
# they are only kept for external backward compatibility purposes.
warnings.filterwarnings(
action='error',
category=DeprecationWarning,
module=fr'{__name__}\..*',
)
# When the deprecated APIs are used from __main__ (script or notebook), always
# show the warning
warnings.filterwarnings(
action='always',
category=DeprecationWarning,
module=r'__main__',
)
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
|
{
"content_hash": "d62355529b071c5a797e3a84e5da7bfd",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 35.58064516129032,
"alnum_prop": 0.6473254759746147,
"repo_name": "credp/lisa",
"id": "05c02b5e2d1c68d81aed04abdc13c3257b4a0fdd",
"size": "1128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lisa/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "18748"
},
{
"name": "Jupyter Notebook",
"bytes": "81363929"
},
{
"name": "Makefile",
"bytes": "4003"
},
{
"name": "Perl",
"bytes": "6106"
},
{
"name": "Python",
"bytes": "2309481"
},
{
"name": "Shell",
"bytes": "108055"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import copy
from collections import namedtuple, defaultdict
from . import utils
from .role import Role
from .member import Member, VoiceState
from .activity import create_activity
from .emoji import Emoji
from .permissions import PermissionOverwrite
from .colour import Colour
from .errors import InvalidArgument, ClientException
from .channel import *
from .enums import VoiceRegion, Status, ChannelType, try_enum, VerificationLevel, ContentFilter, NotificationLevel
from .mixins import Hashable
from .user import User
from .invite import Invite
from .iterators import AuditLogIterator
from .webhook import Webhook
from .widget import Widget
from .asset import Asset
BanEntry = namedtuple('BanEntry', 'reason user')
class Guild(Hashable):
"""Represents a Discord guild.
This is referred to as a "server" in the official Discord UI.
.. container:: operations
.. describe:: x == y
Checks if two guilds are equal.
.. describe:: x != y
Checks if two guilds are not equal.
.. describe:: hash(x)
Returns the guild's hash.
.. describe:: str(x)
Returns the guild's name.
Attributes
----------
name: :class:`str`
The guild name.
emojis
A :class:`tuple` of :class:`Emoji` that the guild owns.
region: :class:`VoiceRegion`
The region the guild belongs on. There is a chance that the region
will be a :class:`str` if the value is not recognised by the enumerator.
afk_timeout: :class:`int`
The timeout to get sent to the AFK channel.
afk_channel: Optional[:class:`VoiceChannel`]
The channel that denotes the AFK channel. None if it doesn't exist.
icon: Optional[:class:`str`]
The guild's icon.
id: :class:`int`
The guild's ID.
owner_id: :class:`int`
The guild owner's ID. Use :attr:`Guild.owner` instead.
unavailable: :class:`bool`
Indicates if the guild is unavailable. If this is ``True`` then the
reliability of other attributes outside of :meth:`Guild.id` is slim and they might
all be None. It is best to not do anything with the guild if it is unavailable.
Check the :func:`on_guild_unavailable` and :func:`on_guild_available` events.
max_presences: Optional[:class:`int`]
The maximum amount of presences for the guild.
max_members: Optional[:class:`int`]
The maximum amount of members for the guild.
banner: Optional[:class:`str`]
The guild's banner.
description: Optional[:class:`str`]
The guild's description.
mfa_level: :class:`int`
Indicates the guild's two factor authorisation level. If this value is 0 then
the guild does not require 2FA for their administrative members. If the value is
1 then they do.
verification_level: :class:`VerificationLevel`
The guild's verification level.
explicit_content_filter: :class:`ContentFilter`
The guild's explicit content filter.
default_notifications: :class:`NotificationLevel`
The guild's notification settings.
features: List[:class:`str`]
A list of features that the guild has. They are currently as follows:
- ``VIP_REGIONS``: Guild has VIP voice regions
- ``VANITY_URL``: Guild has a vanity invite URL (e.g. discord.gg/discord-api)
- ``INVITE_SPLASH``: Guild's invite page has a special splash.
- ``VERIFIED``: Guild is a "verified" server.
- ``MORE_EMOJI``: Guild is allowed to have more than 50 custom emoji.
splash: Optional[:class:`str`]
The guild's invite splash.
"""
__slots__ = ('afk_timeout', 'afk_channel', '_members', '_channels', 'icon',
'name', 'id', 'unavailable', 'banner', 'region', '_state',
'_default_role', '_roles', '_member_count', '_large',
'owner_id', 'mfa_level', 'emojis', 'features',
'verification_level', 'explicit_content_filter', 'splash',
'_voice_states', '_system_channel_id', 'default_notifications',
'description', 'max_presences', 'max_members', 'premium_tier')
def __init__(self, *, data, state):
self._channels = {}
self._members = {}
self._voice_states = {}
self._state = state
self._from_data(data)
def _add_channel(self, channel):
self._channels[channel.id] = channel
def _remove_channel(self, channel):
self._channels.pop(channel.id, None)
def _voice_state_for(self, user_id):
return self._voice_states.get(user_id)
def _add_member(self, member):
self._members[member.id] = member
def _remove_member(self, member):
self._members.pop(member.id, None)
def __str__(self):
return self.name
def __repr__(self):
return '<Guild id={0.id} name={0.name!r} chunked={0.chunked}>'.format(self)
def _update_voice_state(self, data, channel_id):
user_id = int(data['user_id'])
channel = self.get_channel(channel_id)
try:
# check if we should remove the voice state from cache
if channel is None:
after = self._voice_states.pop(user_id)
else:
after = self._voice_states[user_id]
before = copy.copy(after)
after._update(data, channel)
except KeyError:
# if we're here then we're getting added into the cache
after = VoiceState(data=data, channel=channel)
before = VoiceState(data=data, channel=None)
self._voice_states[user_id] = after
member = self.get_member(user_id)
return member, before, after
def _add_role(self, role):
# roles get added to the bottom (position 1, pos 0 is @everyone)
# so since self.roles has the @everyone role, we can't increment
# its position because it's stuck at position 0. Luckily x += False
# is equivalent to adding 0. So we cast the position to a bool and
# increment it.
for r in self._roles.values():
r.position += (not r.is_default())
self._roles[role.id] = role
def _remove_role(self, role_id):
# this raises KeyError if it fails..
role = self._roles.pop(role_id)
# since it didn't, we can change the positions now
# basically the same as above except we only decrement
# the position if we're above the role we deleted.
for r in self._roles.values():
r.position -= r.position > role.position
return role
def _from_data(self, guild):
# according to Stan, this is always available even if the guild is unavailable
# I don't have this guarantee when someone updates the guild.
member_count = guild.get('member_count', None)
if member_count:
self._member_count = member_count
self.name = guild.get('name')
self.region = try_enum(VoiceRegion, guild.get('region'))
self.verification_level = try_enum(VerificationLevel, guild.get('verification_level'))
self.default_notifications = try_enum(NotificationLevel, guild.get('default_message_notifications'))
self.explicit_content_filter = try_enum(ContentFilter, guild.get('explicit_content_filter', 0))
self.afk_timeout = guild.get('afk_timeout')
self.icon = guild.get('icon')
self.banner = guild.get('banner')
self.unavailable = guild.get('unavailable', False)
self.id = int(guild['id'])
self._roles = {}
state = self._state # speed up attribute access
for r in guild.get('roles', []):
role = Role(guild=self, data=r, state=state)
self._roles[role.id] = role
self.mfa_level = guild.get('mfa_level')
self.emojis = tuple(map(lambda d: state.store_emoji(self, d), guild.get('emojis', [])))
self.features = guild.get('features', [])
self.splash = guild.get('splash')
self._system_channel_id = utils._get_as_snowflake(guild, 'system_channel_id')
self.description = guild.get('description')
self.max_presences = guild.get('max_presences')
self.max_members = guild.get('max_members')
self.premium_tier = guild.get('premium_tier')
for mdata in guild.get('members', []):
member = Member(data=mdata, guild=self, state=state)
self._add_member(member)
self._sync(guild)
self._large = None if member_count is None else self._member_count >= 250
self.owner_id = utils._get_as_snowflake(guild, 'owner_id')
self.afk_channel = self.get_channel(utils._get_as_snowflake(guild, 'afk_channel_id'))
for obj in guild.get('voice_states', []):
self._update_voice_state(obj, int(obj['channel_id']))
def _sync(self, data):
try:
self._large = data['large']
except KeyError:
pass
empty_tuple = tuple()
for presence in data.get('presences', []):
user_id = int(presence['user']['id'])
member = self.get_member(user_id)
if member is not None:
member._presence_update(presence, empty_tuple)
if 'channels' in data:
channels = data['channels']
for c in channels:
c_type = c['type']
if c_type in (ChannelType.text.value, ChannelType.news.value):
self._add_channel(TextChannel(guild=self, data=c, state=self._state))
elif c_type == ChannelType.voice.value:
self._add_channel(VoiceChannel(guild=self, data=c, state=self._state))
elif c_type == ChannelType.category.value:
self._add_channel(CategoryChannel(guild=self, data=c, state=self._state))
elif c_type == ChannelType.store.value:
self._add_channel(StoreChannel(guild=self, data=c, state=self._state))
@property
def channels(self):
"""List[:class:`abc.GuildChannel`]: A list of channels that belongs to this guild."""
return list(self._channels.values())
@property
def large(self):
""":class:`bool`: Indicates if the guild is a 'large' guild.
A large guild is defined as having more than ``large_threshold`` count
members, which for this library is set to the maximum of 250.
"""
if self._large is None:
try:
return self._member_count >= 250
except AttributeError:
return len(self._members) >= 250
return self._large
@property
def voice_channels(self):
"""List[:class:`VoiceChannel`]: A list of voice channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, VoiceChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def me(self):
"""Similar to :attr:`Client.user` except an instance of :class:`Member`.
This is essentially used to get the member version of yourself.
"""
self_id = self._state.user.id
return self.get_member(self_id)
@property
def voice_client(self):
"""Returns the :class:`VoiceClient` associated with this guild, if any."""
return self._state._get_voice_client(self.id)
@property
def text_channels(self):
"""List[:class:`TextChannel`]: A list of text channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, TextChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def categories(self):
"""List[:class:`CategoryChannel`]: A list of categories that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, CategoryChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
def by_category(self):
"""Returns every :class:`CategoryChannel` and their associated channels.
These channels and categories are sorted in the official Discord UI order.
If the channels do not have a category, then the first element of the tuple is
``None``.
Returns
--------
List[Tuple[Optional[:class:`CategoryChannel`], List[:class:`abc.GuildChannel`]]]:
The categories and their associated channels.
"""
grouped = defaultdict(list)
for channel in self._channels.values():
if isinstance(channel, CategoryChannel):
continue
grouped[channel.category_id].append(channel)
def key(t):
k, v = t
return ((k.position, k.id) if k else (-1, -1), v)
_get = self._channels.get
as_list = [(_get(k), v) for k, v in grouped.items()]
as_list.sort(key=key)
for _, channels in as_list:
channels.sort(key=lambda c: (c._sorting_bucket, c.position, c.id))
return as_list
def get_channel(self, channel_id):
"""Returns a :class:`abc.GuildChannel` with the given ID. If not found, returns None."""
return self._channels.get(channel_id)
@property
def system_channel(self):
"""Optional[:class:`TextChannel`]: Returns the guild's channel used for system messages.
Currently this is only for new member joins. If no channel is set, then this returns ``None``.
"""
channel_id = self._system_channel_id
return channel_id and self._channels.get(channel_id)
@property
def members(self):
"""List[:class:`Member`]: A list of members that belong to this guild."""
return list(self._members.values())
def get_member(self, user_id):
"""Returns a :class:`Member` with the given ID. If not found, returns None."""
return self._members.get(user_id)
@property
def roles(self):
"""Returns a :class:`list` of the guild's roles in hierarchy order.
The first element of this list will be the lowest role in the
hierarchy.
"""
return sorted(self._roles.values())
def get_role(self, role_id):
"""Returns a :class:`Role` with the given ID. If not found, returns None."""
return self._roles.get(role_id)
@utils.cached_slot_property('_default_role')
def default_role(self):
"""Gets the @everyone role that all members have by default."""
return utils.find(lambda r: r.is_default(), self._roles.values())
@property
def owner(self):
""":class:`Member`: The member that owns the guild."""
return self.get_member(self.owner_id)
@property
def icon_url(self):
"""Returns the URL version of the guild's icon. Returns an empty string if it has no icon."""
return self.icon_url_as()
def icon_url_as(self, *, format='webp', size=1024):
"""Returns a friendly URL version of the guild's icon. Returns an empty string if it has no icon.
The format must be one of 'webp', 'jpeg', 'jpg', or 'png'. The
size must be a power of 2 between 16 and 4096.
Parameters
-----------
format: :class:`str`
The format to attempt to convert the icon to.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_guild_image(self._state, self.id, self.icon, 'icons', format=format, size=size)
@property
def banner_url(self):
"""Returns the URL version of the guild's banner. Returns an empty string if it has no banner."""
return self.banner_url_as()
def banner_url_as(self, *, format='webp', size=2048):
"""Returns a friendly URL version of the guild's banner. Returns an empty string if it has no banner.
The format must be one of 'webp', 'jpeg', or 'png'. The
size must be a power of 2 between 16 and 4096.
Parameters
-----------
format: :class:`str`
The format to attempt to convert the banner to.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_guild_image(self._state, self.id, self.banner, 'banners', format=format, size=size)
@property
def splash_url(self):
"""Returns the URL version of the guild's invite splash. Returns an empty string if it has no splash."""
return self.splash_url_as()
def splash_url_as(self, *, format='webp', size=2048):
"""Returns a friendly URL version of the guild's invite splash. Returns an empty string if it has no splash.
The format must be one of 'webp', 'jpeg', 'jpg', or 'png'. The
size must be a power of 2 between 16 and 4096.
Parameters
-----------
format: :class:`str`
The format to attempt to convert the splash to.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_guild_image(self._state, self.id, self.splash, 'splashes', format=format, size=size)
@property
def member_count(self):
"""Returns the true member count regardless of it being loaded fully or not."""
return self._member_count
@property
def chunked(self):
"""Returns a boolean indicating if the guild is "chunked".
A chunked guild means that :attr:`member_count` is equal to the
number of members stored in the internal :attr:`members` cache.
If this value returns ``False``, then you should request for
offline members.
"""
count = getattr(self, '_member_count', None)
if count is None:
return False
return count == len(self._members)
@property
def shard_id(self):
"""Returns the shard ID for this guild if applicable."""
count = self._state.shard_count
if count is None:
return None
return (self.id >> 22) % count
@property
def created_at(self):
"""Returns the guild's creation time in UTC."""
return utils.snowflake_time(self.id)
def get_member_named(self, name):
"""Returns the first member found that matches the name provided.
The name can have an optional discriminator argument, e.g. "Jake#0001"
or "Jake" will both do the lookup. However the former will give a more
precise result. Note that the discriminator must have all 4 digits
for this to work.
If a nickname is passed, then it is looked up via the nickname. Note
however, that a nickname + discriminator combo will not lookup the nickname
but rather the username + discriminator combo due to nickname + discriminator
not being unique.
If no member is found, ``None`` is returned.
Parameters
-----------
name: :class:`str`
The name of the member to lookup with an optional discriminator.
Returns
--------
:class:`Member`
The member in this guild with the associated name. If not found
then ``None`` is returned.
"""
result = None
members = self.members
if len(name) > 5 and name[-5] == '#':
# The 5 length is checking to see if #0000 is in the string,
# as a#0000 has a length of 6, the minimum for a potential
# discriminator lookup.
potential_discriminator = name[-4:]
# do the actual lookup and return if found
# if it isn't found then we'll do a full name lookup below.
result = utils.get(members, name=name[:-5], discriminator=potential_discriminator)
if result is not None:
return result
def pred(m):
return m.nick == name or m.name == name
return utils.find(pred, members)
def _create_channel(self, name, overwrites, channel_type, category=None, **options):
if overwrites is None:
overwrites = {}
elif not isinstance(overwrites, dict):
raise InvalidArgument('overwrites parameter expects a dict.')
perms = []
for target, perm in overwrites.items():
if not isinstance(perm, PermissionOverwrite):
raise InvalidArgument('Expected PermissionOverwrite received {0.__name__}'.format(type(perm)))
allow, deny = perm.pair()
payload = {
'allow': allow.value,
'deny': deny.value,
'id': target.id
}
if isinstance(target, Role):
payload['type'] = 'role'
else:
payload['type'] = 'member'
perms.append(payload)
try:
options['rate_limit_per_user'] = options.pop('slowmode_delay')
except KeyError:
pass
parent_id = category.id if category else None
return self._state.http.create_channel(self.id, channel_type.value, name=name, parent_id=parent_id,
permission_overwrites=perms, **options)
async def create_text_channel(self, name, *, overwrites=None, category=None, reason=None, **options):
"""|coro|
Creates a :class:`TextChannel` for the guild.
Note that you need the :attr:`~Permissions.manage_channels` permission
to create the channel.
The ``overwrites`` parameter can be used to create a 'secret'
channel upon creation. This parameter expects a :class:`dict` of
overwrites with the target (either a :class:`Member` or a :class:`Role`)
as the key and a :class:`PermissionOverwrite` as the value.
.. note::
Creating a channel of a specified position will not update the position of
other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit`
will be required to update the position of the channel in the channel list.
Examples
----------
Creating a basic channel:
.. code-block:: python3
channel = await guild.create_text_channel('cool-channel')
Creating a "secret" channel:
.. code-block:: python3
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
channel = await guild.create_text_channel('secret', overwrites=overwrites)
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
topic: Optional[:class:`str`]
The new channel's topic.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel, in seconds.
The maximum value possible is `21600`.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`TextChannel`
The channel that was just created.
"""
data = await self._create_channel(name, overwrites, ChannelType.text, category, reason=reason, **options)
channel = TextChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_voice_channel(self, name, *, overwrites=None, category=None, reason=None, **options):
"""|coro|
This is similar to :meth:`create_text_channel` except makes a :class:`VoiceChannel` instead, in addition
to having the following new parameters.
Parameters
-----------
bitrate: :class:`int`
The channel's preferred audio bitrate in bits per second.
user_limit: :class:`int`
The channel's limit for number of members that can be in a voice channel.
"""
data = await self._create_channel(name, overwrites, ChannelType.voice, category, reason=reason, **options)
channel = VoiceChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_category(self, name, *, overwrites=None, reason=None):
"""|coro|
Same as :meth:`create_text_channel` except makes a :class:`CategoryChannel` instead.
.. note::
The ``category`` parameter is not supported in this function since categories
cannot have categories.
"""
data = await self._create_channel(name, overwrites, ChannelType.category, reason=reason)
channel = CategoryChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
create_category_channel = create_category
async def leave(self):
"""|coro|
Leaves the guild.
.. note::
You cannot leave the guild that you own, you must delete it instead
via :meth:`delete`.
Raises
--------
HTTPException
Leaving the guild failed.
"""
await self._state.http.leave_guild(self.id)
async def delete(self):
"""|coro|
Deletes the guild. You must be the guild owner to delete the
guild.
Raises
--------
HTTPException
Deleting the guild failed.
Forbidden
You do not have permissions to delete the guild.
"""
await self._state.http.delete_guild(self.id)
async def edit(self, *, reason=None, **fields):
"""|coro|
Edits the guild.
You must have the :attr:`~Permissions.manage_guild` permission
to edit the guild.
Parameters
----------
name: :class:`str`
The new name of the guild.
description: :class:`str`
The new description of the guild. This is only available to guilds that
contain `VERIFIED` in :attr:`Guild.features`.
icon: :class:`bytes`
A :term:`py:bytes-like object` representing the icon. Only PNG/JPEG supported.
Could be ``None`` to denote removal of the icon.
banner: :class:`bytes`
A :term:`py:bytes-like object` representing the banner.
Could be ``None`` to denote removal of the banner.
splash: :class:`bytes`
A :term:`py:bytes-like object` representing the invite splash.
Only PNG/JPEG supported. Could be ``None`` to denote removing the
splash. Only available for partnered guilds with ``INVITE_SPLASH``
feature.
region: :class:`VoiceRegion`
The new region for the guild's voice communication.
afk_channel: Optional[:class:`VoiceChannel`]
The new channel that is the AFK channel. Could be ``None`` for no AFK channel.
afk_timeout: :class:`int`
The number of seconds until someone is moved to the AFK channel.
owner: :class:`Member`
The new owner of the guild to transfer ownership to. Note that you must
be owner of the guild to do this.
verification_level: :class:`VerificationLevel`
The new verification level for the guild.
default_notifications: :class:`NotificationLevel`
The new default notification level for the guild.
explicit_content_filter: :class:`ContentFilter`
The new explicit content filter for the guild.
vanity_code: :class:`str`
The new vanity code for the guild.
system_channel: Optional[:class:`TextChannel`]
The new channel that is used for the system channel. Could be ``None`` for no system channel.
reason: Optional[:class:`str`]
The reason for editing this guild. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to edit the guild.
HTTPException
Editing the guild failed.
InvalidArgument
The image format passed in to ``icon`` is invalid. It must be
PNG or JPG. This is also raised if you are not the owner of the
guild and request an ownership transfer.
"""
http = self._state.http
try:
icon_bytes = fields['icon']
except KeyError:
icon = self.icon
else:
if icon_bytes is not None:
icon = utils._bytes_to_base64_data(icon_bytes)
else:
icon = None
try:
banner_bytes = fields['banner']
except KeyError:
banner = self.banner
else:
if banner_bytes is not None:
banner = utils._bytes_to_base64_data(banner_bytes)
else:
banner = None
try:
vanity_code = fields['vanity_code']
except KeyError:
pass
else:
await http.change_vanity_code(self.id, vanity_code, reason=reason)
try:
splash_bytes = fields['splash']
except KeyError:
splash = self.splash
else:
if splash_bytes is not None:
splash = utils._bytes_to_base64_data(splash_bytes)
else:
splash = None
fields['icon'] = icon
fields['banner'] = banner
fields['splash'] = splash
try:
default_message_notifications = int(fields.pop('default_notifications'))
except (TypeError, KeyError):
pass
else:
fields['default_message_notifications'] = default_message_notifications
try:
afk_channel = fields.pop('afk_channel')
except KeyError:
pass
else:
if afk_channel is None:
fields['afk_channel_id'] = afk_channel
else:
fields['afk_channel_id'] = afk_channel.id
try:
system_channel = fields.pop('system_channel')
except KeyError:
pass
else:
if system_channel is None:
fields['system_channel_id'] = system_channel
else:
fields['system_channel_id'] = system_channel.id
if 'owner' in fields:
if self.owner != self.me:
raise InvalidArgument('To transfer ownership you must be the owner of the guild.')
fields['owner_id'] = fields['owner'].id
if 'region' in fields:
fields['region'] = str(fields['region'])
level = fields.get('verification_level', self.verification_level)
if not isinstance(level, VerificationLevel):
raise InvalidArgument('verification_level field must be of type VerificationLevel')
fields['verification_level'] = level.value
explicit_content_filter = fields.get('explicit_content_filter', self.explicit_content_filter)
if not isinstance(explicit_content_filter, ContentFilter):
raise InvalidArgument('explicit_content_filter field must be of type ContentFilter')
fields['explicit_content_filter'] = explicit_content_filter.value
await http.edit_guild(self.id, reason=reason, **fields)
async def fetch_member(self, member_id):
"""|coro|
Retreives a :class:`Member` from a guild ID, and a member ID.
.. note::
This method is an API call. For general usage, consider :meth:`get_member` instead.
Parameters
-----------
member_id: :class:`int`
The member's ID to fetch from.
Raises
-------
Forbidden
You do not have access to the guild.
HTTPException
Getting the guild failed.
Returns
--------
:class:`Member`
The member from the member ID.
"""
data = await self._state.http.get_member(self.id, member_id)
return Member(data=data, state=self._state, guild=self)
async def fetch_ban(self, user):
"""|coro|
Retrieves the :class:`BanEntry` for a user, which is a namedtuple
with a ``user`` and ``reason`` field. See :meth:`bans` for more
information.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to get ban information from.
Raises
------
Forbidden
You do not have proper permissions to get the information.
NotFound
This user is not banned.
HTTPException
An error occurred while fetching the information.
Returns
-------
BanEntry
The BanEntry object for the specified user.
"""
data = await self._state.http.get_ban(user.id, self.id)
return BanEntry(
user=User(state=self._state, data=data['user']),
reason=data['reason']
)
async def bans(self):
"""|coro|
Retrieves all the users that are banned from the guild.
This coroutine returns a :class:`list` of BanEntry objects, which is a
namedtuple with a ``user`` field to denote the :class:`User`
that got banned along with a ``reason`` field specifying
why the user was banned that could be set to ``None``.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
--------
List[BanEntry]
A list of BanEntry objects.
"""
data = await self._state.http.get_bans(self.id)
return [BanEntry(user=User(state=self._state, data=e['user']),
reason=e['reason'])
for e in data]
async def prune_members(self, *, days, compute_prune_count=True, reason=None):
r"""|coro|
Prunes the guild from its inactive members.
The inactive members are denoted if they have not logged on in
``days`` number of days and they have no roles.
You must have the :attr:`~Permissions.kick_members` permission
to use this.
To check how many members you would prune without actually pruning,
see the :meth:`estimate_pruned_members` function.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
compute_prune_count: :class:`bool`
Whether to compute the prune count. This defaults to ``True``
which makes it prone to timeouts in very large guilds. In order
to prevent timeouts, you must set this to ``False``. If this is
set to ``False``\, then this function will always return ``None``.
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while pruning members.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
Optional[:class:`int`]
The number of members pruned. If ``compute_prune_count`` is ``False``
then this returns ``None``.
"""
if not isinstance(days, int):
raise InvalidArgument('Expected int for ``days``, received {0.__class__.__name__} instead.'.format(days))
data = await self._state.http.prune_members(self.id, days, compute_prune_count=compute_prune_count, reason=reason)
return data['pruned']
async def webhooks(self):
"""|coro|
Gets the list of webhooks from this guild.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
Raises
-------
Forbidden
You don't have permissions to get the webhooks.
Returns
--------
List[:class:`Webhook`]
The webhooks for this guild.
"""
data = await self._state.http.guild_webhooks(self.id)
return [Webhook.from_state(d, state=self._state) for d in data]
async def estimate_pruned_members(self, *, days):
"""|coro|
Similar to :meth:`prune_members` except instead of actually
pruning members, it returns how many members it would prune
from the guild had it been called.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while fetching the prune members estimate.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
:class:`int`
The number of members estimated to be pruned.
"""
if not isinstance(days, int):
raise InvalidArgument('Expected int for ``days``, received {0.__class__.__name__} instead.'.format(days))
data = await self._state.http.estimate_pruned_members(self.id, days)
return data['pruned']
async def invites(self):
"""|coro|
Returns a list of all active instant invites from the guild.
You must have the :attr:`~Permissions.manage_guild` permission to get
this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
-------
List[:class:`Invite`]
The list of invites that are currently active.
"""
data = await self._state.http.invites_from(self.id)
result = []
for invite in data:
channel = self.get_channel(int(invite['channel']['id']))
invite['channel'] = channel
invite['guild'] = self
result.append(Invite(state=self._state, data=invite))
return result
async def fetch_emojis(self):
r"""|coro|
Retrieves all custom :class:`Emoji`\s from the guild.
.. note::
This method is an API call. For general usage, consider :attr:`emojis` instead.
Raises
---------
HTTPException
An error occurred fetching the emojis.
Returns
--------
List[:class:`Emoji`]
The retrieved emojis.
"""
data = await self._state.http.get_all_custom_emojis(self.id)
return [Emoji(guild=self, state=self._state, data=d) for d in data]
async def fetch_emoji(self, emoji_id):
"""|coro|
Retrieves a custom :class:`Emoji` from the guild.
.. note::
This method is an API call.
For general usage, consider iterating over :attr:`emojis` instead.
Parameters
-------------
emoji_id: :class:`int`
The emoji's ID.
Raises
---------
NotFound
The emoji requested could not be found.
HTTPException
An error occurred fetching the emoji.
Returns
--------
:class:`Emoji`
The retrieved emoji.
"""
data = await self._state.http.get_custom_emoji(self.id, emoji_id)
return Emoji(guild=self, state=self._state, data=data)
async def create_custom_emoji(self, *, name, image, roles=None, reason=None):
r"""|coro|
Creates a custom :class:`Emoji` for the guild.
There is currently a limit of 50 static and animated emojis respectively per guild,
unless the guild has the ``MORE_EMOJI`` feature which extends the limit to 200.
You must have the :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
name: :class:`str`
The emoji name. Must be at least 2 characters.
image: :class:`bytes`
The :term:`py:bytes-like object` representing the image data to use.
Only JPG, PNG and GIF images are supported.
roles: Optional[List[:class:`Role`]]
A :class:`list` of :class:`Role`\s that can use this emoji. Leave empty to make it available to everyone.
reason: Optional[:class:`str`]
The reason for creating this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to create emojis.
HTTPException
An error occurred creating an emoji.
Returns
--------
:class:`Emoji`
The created emoji.
"""
img = utils._bytes_to_base64_data(image)
if roles:
roles = [role.id for role in roles]
data = await self._state.http.create_custom_emoji(self.id, name, img, roles=roles, reason=reason)
return self._state.store_emoji(self, data)
async def create_role(self, *, reason=None, **fields):
"""|coro|
Creates a :class:`Role` for the guild.
All fields are optional.
You must have the :attr:`~Permissions.manage_roles` permission to
do this.
Parameters
-----------
name: :class:`str`
The role name. Defaults to 'new role'.
permissions: :class:`Permissions`
The permissions to have. Defaults to no permissions.
colour: :class:`Colour`
The colour for the role. Defaults to :meth:`Colour.default`.
This is aliased to ``color`` as well.
hoist: :class:`bool`
Indicates if the role should be shown separately in the member list.
Defaults to False.
mentionable: :class:`bool`
Indicates if the role should be mentionable by others.
Defaults to False.
reason: Optional[:class:`str`]
The reason for creating this role. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to create the role.
HTTPException
Creating the role failed.
InvalidArgument
An invalid keyword argument was given.
Returns
--------
:class:`Role`
The newly created role.
"""
try:
perms = fields.pop('permissions')
except KeyError:
fields['permissions'] = 0
else:
fields['permissions'] = perms.value
try:
colour = fields.pop('colour')
except KeyError:
colour = fields.get('color', Colour.default())
finally:
fields['color'] = colour.value
valid_keys = ('name', 'permissions', 'color', 'hoist', 'mentionable')
for key in fields:
if key not in valid_keys:
raise InvalidArgument('%r is not a valid field.' % key)
data = await self._state.http.create_role(self.id, reason=reason, **fields)
role = Role(guild=self, data=data, state=self._state)
# TODO: add to cache
return role
async def kick(self, user, *, reason=None):
"""|coro|
Kicks a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.kick_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to kick from their guild.
reason: Optional[:class:`str`]
The reason the user got kicked.
Raises
-------
Forbidden
You do not have the proper permissions to kick.
HTTPException
Kicking failed.
"""
await self._state.http.kick(user.id, self.id, reason=reason)
async def ban(self, user, *, reason=None, delete_message_days=1):
"""|coro|
Bans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to ban from their guild.
delete_message_days: :class:`int`
The number of days worth of messages to delete from the user
in the guild. The minimum is 0 and the maximum is 7.
reason: Optional[:class:`str`]
The reason the user got banned.
Raises
-------
Forbidden
You do not have the proper permissions to ban.
HTTPException
Banning failed.
"""
await self._state.http.ban(user.id, self.id, delete_message_days, reason=reason)
async def unban(self, user, *, reason=None):
"""|coro|
Unbans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to unban.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to unban.
HTTPException
Unbanning failed.
"""
await self._state.http.unban(user.id, self.id, reason=reason)
async def vanity_invite(self):
"""|coro|
Returns the guild's special vanity invite.
The guild must be partnered, i.e. have 'VANITY_URL' in
:attr:`~Guild.features`.
You must have the :attr:`~Permissions.manage_guild` permission to use
this as well.
Raises
-------
Forbidden
You do not have the proper permissions to get this.
HTTPException
Retrieving the vanity invite failed.
Returns
--------
:class:`Invite`
The special vanity invite.
"""
# we start with { code: abc }
payload = await self._state.http.get_vanity_code(self.id)
# get the vanity URL channel since default channels aren't
# reliable or a thing anymore
data = await self._state.http.get_invite(payload['code'])
payload['guild'] = self
payload['channel'] = self.get_channel(int(data['channel']['id']))
payload['revoked'] = False
payload['temporary'] = False
payload['max_uses'] = 0
payload['max_age'] = 0
return Invite(state=self._state, data=payload)
def ack(self):
"""|coro|
Marks every message in this guild as read.
The user must not be a bot user.
Raises
-------
HTTPException
Acking failed.
ClientException
You must not be a bot user.
"""
state = self._state
if state.is_bot:
raise ClientException('Must not be a bot account to ack messages.')
return state.http.ack_guild(self.id)
def audit_logs(self, *, limit=100, before=None, after=None, oldest_first=None, user=None, action=None):
"""Return an :class:`AsyncIterator` that enables receiving the guild's audit logs.
You must have the :attr:`~Permissions.view_audit_log` permission to use this.
Examples
----------
Getting the first 100 entries: ::
async for entry in guild.audit_logs(limit=100):
print('{0.user} did {0.action} to {0.target}'.format(entry))
Getting entries for a specific action: ::
async for entry in guild.audit_logs(action=discord.AuditLogAction.ban):
print('{0.user} banned {0.target}'.format(entry))
Getting entries made by a specific user: ::
entries = await guild.audit_logs(limit=None, user=guild.me).flatten()
await channel.send('I made {} moderation actions.'.format(len(entries)))
Parameters
-----------
limit: Optional[:class:`int`]
The number of entries to retrieve. If ``None`` retrieve all entries.
before: Union[:class:`abc.Snowflake`, datetime]
Retrieve entries before this date or entry.
If a date is provided it must be a timezone-naive datetime representing UTC time.
after: Union[:class:`abc.Snowflake`, datetime]
Retrieve entries after this date or entry.
If a date is provided it must be a timezone-naive datetime representing UTC time.
oldest_first: :class:`bool`
If set to true, return entries in oldest->newest order. Defaults to True if
``after`` is specified, otherwise False.
user: :class:`abc.Snowflake`
The moderator to filter entries from.
action: :class:`AuditLogAction`
The action to filter with.
Raises
-------
Forbidden
You are not allowed to fetch audit logs
HTTPException
An error occurred while fetching the audit logs.
Yields
--------
:class:`AuditLogEntry`
The audit log entry.
"""
if user:
user = user.id
if action:
action = action.value
return AuditLogIterator(self, before=before, after=after, limit=limit,
oldest_first=oldest_first, user_id=user, action_type=action)
async def widget(self):
"""|coro|
Returns the widget of the guild.
.. note::
The guild must have the widget enabled to get this information.
Raises
-------
Forbidden
The widget for this guild is disabled.
HTTPException
Retrieving the widget failed.
Returns
--------
:class:`Widget`
The guild's widget.
"""
data = await self._state.http.get_widget(self.id)
return Widget(state=self._state, data=data)
|
{
"content_hash": "917106bec7317ad5ce8b543e8b5fd9dc",
"timestamp": "",
"source": "github",
"line_count": 1543,
"max_line_length": 122,
"avg_line_length": 34.78483473752431,
"alnum_prop": 0.588508188474652,
"repo_name": "gnmiller/craig-bot",
"id": "3dd28533e1e2a3f35c19104ec49aead65811a7f5",
"size": "53698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "craig-bot/lib/python3.6/site-packages/discord/guild.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259967"
},
{
"name": "C++",
"bytes": "670"
},
{
"name": "Python",
"bytes": "5770206"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['kobuki_noros'],
package_dir={'': 'src'},
)
setup(**setup_args)
|
{
"content_hash": "862656ce313e6185ddf02a7cabe60129",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 24.8,
"alnum_prop": 0.7338709677419355,
"repo_name": "git-afsantos/kobukium",
"id": "76e9ccfaca39577394f2145cddf9608ed8c8646c",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kobuki_noros/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "1720"
},
{
"name": "CSS",
"bytes": "7238"
},
{
"name": "HTML",
"bytes": "4945"
},
{
"name": "JavaScript",
"bytes": "25204"
},
{
"name": "Python",
"bytes": "54729"
}
],
"symlink_target": ""
}
|
"""
Created on Tir 29 Oct 2013.
Uses the Crank Nicolson scheme to solve the time dependent Schrodinger equation
for a center potential spike (delta potential).
Animation is done using the matplotlib.pyplot library.
Usage:
python CrankNicolsonPotentialSpike.py
or equivalent:
./CrankNicolsonPotentialSpike.py
No commandline arguments are needed.
Note that some of the probability "tunnels" through the barrier.
@author Benedicte Emilie Braekken
"""
# Tools for sparse matrices
import scipy.sparse as sparse
import scipy.sparse.linalg
# Numerical tools
from numpy import *
# Plotting library
from matplotlib.pyplot import *
"""Physical constants"""
_E0p = 938.27 # Rest energy for a proton [MeV]
_hbarc = 0.1973 # [MeV pm]
_c = 3.0e2 # Spees of light [pm / as]
def Psi0( x ):
'''
Initial state for a travelling gaussian wave packet.
'''
x0 = -0.100 # [pm]
a = 0.0050 # [pm]
l = 200000.0 # [1 / pm]
A = ( 1. / ( 2 * pi * a**2 ) )**0.25
K1 = exp( - ( x - x0 )**2 / ( 4. * a**2 ) )
K2 = exp( 1j * l * x )
return A * K1 * K2
def deltaPotential( x, height=75 ):
"""
A potential spike or delta potential in the center.
@param height Defines the height of the barrier / spike. This should be
chosen to be high "enough".
"""
# Declare new empty array with same length as x
potential = zeros( len( x ) )
# Middle point has high potential
potential[ 0.5*len(potential) ] = height
return potential
if __name__ == '__main__':
nx = 1001 # Number of points in x direction
dx = 0.001 # Distance between x points [pm]
# Use zero as center, same amount of points each side
a = - 0.5 * nx * dx
b = 0.5 * nx * dx
x = linspace( a, b, nx )
# Time parameters
T = 0.005 # How long to run simulation [as]
dt = 1e-5 # The time step [as]
t = 0
time_steps = int( T / dt ) # Number of time steps
# Constants - save time by calculating outside of loop
k1 = - ( 1j * _hbarc * _c) / (2. * _E0p )
k2 = ( 1j * _c ) / _hbarc
# Create the initial state Psi
Psi = Psi0(x)
# Create the matrix containing central differences. It it used to
# approximate the second derivative.
data = ones((3, nx))
data[1] = -2*data[1]
diags = [-1,0,1]
D2 = k1 / dx**2 * sparse.spdiags(data,diags,nx,nx)
# Identity Matrix
I = sparse.identity(nx)
# Create the diagonal matrix containing the potential.
V_data = deltaPotential(x)
V_diags = [0]
V = k2 * sparse.spdiags(V_data, V_diags, nx, nx)
# Put mmatplotlib in interactive mode for animation
ion()
# Setup the figure before starting animation
fig = figure() # Create window
ax = fig.add_subplot(111) # Add axes
line, = ax.plot( x, abs(Psi)**2, label='$|\Psi(x,t)|^2$' ) # Fetch the line object
# Also draw a green line illustrating the potential
ax.plot( x, V_data, label='$V(x)$' )
# Add other properties to the plot to make it elegant
fig.suptitle("Solution of Schrodinger's equation with delta potential") # Title of plot
ax.grid('on') # Square grid lines in plot
ax.set_xlabel('$x$ [pm]') # X label of axes
ax.set_ylabel('$|\Psi(x, t)|^2$ [1/pm] and $V(x)$ [MeV]') # Y label of axes
ax.legend(loc='best') # Adds labels of the lines to the window
draw() # Draws first window
# Time loop
while t < T:
"""
For each iteration: Solve the system of linear equations:
(I - k/2*D2) u_new = (I + k/2*D2)*u_old
"""
# Set the elements of the equation
A = (I - dt/2*(D2 + V))
b = (I + dt/2. * (D2 + V)) * Psi
# Calculate the new Psi
Psi = sparse.linalg.spsolve(A,b)
# Update time
t += dt
# Plot this new state
line.set_ydata( abs(Psi)**2 ) # Update the y values of the Psi line
draw() # Update the plot
# Turn off interactive mode
ioff()
# Add show so that windows do not automatically close
show()
|
{
"content_hash": "edaec43ecb40c627d664c29ad898f951",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 91,
"avg_line_length": 28.097902097902097,
"alnum_prop": 0.6070184171229467,
"repo_name": "ahye/FYS2140-Resources",
"id": "cd5ef667fd1fb64aede0e8018d637b1c3916286b",
"size": "4040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/CrankNicolson/CrankNicolsonPotentialSpike.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45359"
}
],
"symlink_target": ""
}
|
try:
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
from django.views.generic import TemplateView
from django.core.urlresolvers import reverse
from hyperadmin.apirequests import InternalAPIRequest
import logging
class Client(object):
default_namespace = 'hyper-client'
default_app_name = 'client'
def __init__(self, api_endpoint, name=None, app_name=None):
self.api_endpoint = api_endpoint
self.name = name or self.default_namespace
self.app_name = app_name or self.default_app_name
def get_logger(self):
return logging.getLogger(__name__)
def get_urls(self):
pass
def urls(self):
return self, self.app_name, self.name
urls = property(urls)
@property
def urlpatterns(self):
return self.get_urls()
def reverse(self, name, *args, **kwargs):
return reverse('%s:%s' % (self.name, name), args=args, kwargs=kwargs, current_app=self.app_name)
class SimpleTemplateClientView(TemplateView):
client = None
def get_context_data(self, **kwargs):
context = super(SimpleTemplateClientView, self).get_context_data(**kwargs)
context.update(self.client.get_context_data())
return context
class SimpleTemplateClient(Client):
template_name = None
template_view = SimpleTemplateClientView
def get_media(self):
pass #TODO
def get_context_data(self):
api_endpoint = self.api_endpoint
api_request = InternalAPIRequest(site=api_endpoint)
api_endpoint = api_endpoint.fork(api_request=api_request)
api_url = api_endpoint.get_url()
return {'media':self.get_media(),
'api_endpoint':api_url,
'client':self,}
def get_urls(self):
urlpatterns = patterns('',
url(r'^$',
self.template_view.as_view(template_name=self.template_name, client=self),
name='index'),
)
return urlpatterns
|
{
"content_hash": "0744db897278330d7ccf7e580691baa1",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 104,
"avg_line_length": 29.785714285714285,
"alnum_prop": 0.6345323741007194,
"repo_name": "webcube/django-hyperadmin",
"id": "66163af54b1896927aec80322b30839ed6597b2d",
"size": "2085",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hyperadmin/clients/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "291331"
}
],
"symlink_target": ""
}
|
"""
One of the most controversial issues in the US educational system is the efficacy of standardized
tests, and whether they are unfair to certain groups. Given our prior knowledge about this topic,
investigating the correlations between SAT scores and demographic factors might be an interesting
angle to take. We could correlate SAT scores with factors like race, gender, income, and more.
The NYC Open Data website has a plethora of data on NYC public schools, including SAT data. But the
data of interest is spread over many different data sets.
First we need to read in and clean multiple datasets and then merge them into a single useful dataset.
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
from mpl_toolkits.basemap import Basemap
# Directory containing all of the datasets
data_dir= "../data/schools"
# All of the CSV-format datasets
data_files = [
"ap_2010.csv",
"class_size.csv",
"demographics.csv",
"graduation.csv",
"hs_directory.csv",
"sat_results.csv"
]
# Dicitonary of Pandas DataFrames for all of the datasets
data = {}
# Read each of the files in the list data_files into a Pandas Dataframe using the read_csv function.
# Add each of the Dataframes to the dictionary data, using the base of the filename as the key.
for data_file in data_files:
df = pd.read_csv(os.path.join(data_dir, data_file))
data[os.path.splitext(data_file)[0]] = df
## Reading in the Survey Data
# Read in survey_all.txt
all_survey = pd.read_csv(os.path.join(data_dir, 'survey_all.txt'), delimiter='\t', encoding='windows-1252')
# Read in survey_d75.txt
d75_survey = pd.read_csv(os.path.join(data_dir, 'survey_d75.txt'), delimiter='\t', encoding='windows-1252')
# Combine the d75_survey and all_survey into a single DataFrame
survey = pd.concat([d75_survey, all_survey], axis=0)
## Cleaning Up The Surveys
# Copy the data from the dbn column of survey into a new column in survey called DBN
survey['DBN'] = survey['dbn']
# List of relevant columns
rel_cols = ["DBN", "rr_s", "rr_t", "rr_p", "N_s", "N_t", "N_p", "saf_p_11", "com_p_11", "eng_p_11",
"aca_p_11", "saf_t_11", "com_t_11", "eng_t_11", "aca_t_11", "saf_s_11", "com_s_11",
"eng_s_11", "aca_s_11", "saf_tot_11", "com_tot_11", "eng_tot_11", "aca_tot_11",]
# Filter survey so it only contains the relevant columns we care about
filtered_survey = survey[rel_cols]
# Assign the Dataframe survey to the key survey in the dictionary data
data['survey'] = filtered_survey
## Inserting DBN Fields
# Copy the dbn column in hs_directory into a new column called DBN
data['hs_directory']['DBN'] = data['hs_directory']['dbn']
def pad_two_digits(an_int):
in_str = str(an_int)
if len(in_str) == 1:
return "0" + in_str
return in_str
# Create a new column called padded_csd in the class_size dataset
data['class_size']['padded_csd'] = data['class_size']['CSD'].apply(pad_two_digits)
# Use the + operator along with the padded_csd and SCHOOL_CODE columns of class_zie, then
# assign the result ot the DBN column of class_size
data['class_size']['DBN'] = data['class_size']['padded_csd'] + data['class_size']['SCHOOL CODE']
## Combining The SAT Scores
sat_results = data['sat_results']
# Convert the three SAT score columns from string data type to numeric datatype
sat_results['SAT Math Avg. Score'] = pd.to_numeric(sat_results['SAT Math Avg. Score'], errors='coerce')
sat_results['SAT Critical Reading Avg. Score'] = pd.to_numeric(sat_results['SAT Critical Reading Avg. Score'], errors='coerce')
sat_results['SAT Writing Avg. Score'] = pd.to_numeric(sat_results['SAT Writing Avg. Score'], errors='coerce')
# Create a column called sat_score that is the combined SAT score
sat_results['sat_score'] = sat_results['SAT Math Avg. Score'] + sat_results['SAT Critical Reading Avg. Score'] + sat_results['SAT Writing Avg. Score']
## Parsing Coordinates For Each School
# Extracting the latitude
def get_latitude(in_str):
matches = re.findall("\(.+, .+\)", in_str)
if len(matches) == 0:
return None
substr = matches[0]
substr = substr.replace('(', '')
substr = substr.replace(')', '')
return substr.split(',')[0]
# Use the apply method with above function to get latitude from Location 1 column
data['hs_directory']['lat'] = data['hs_directory']['Location 1'].apply(get_latitude)
# Extracting the longitude
def get_longitude(in_str):
matches = re.findall("\(.+, .+\)", in_str)
if len(matches) == 0:
return None
substr = matches[0]
substr = substr.replace('(', '')
substr = substr.replace(')', '')
return substr.split(',')[1]
# Use the apply method with above function to get latitude from Location 1 column
data['hs_directory']['lon'] = data['hs_directory']['Location 1'].apply(get_longitude)
# Convert lat and lon columns to numeric
data['hs_directory']['lat'] = pd.to_numeric(data['hs_directory']['lat'], errors='coerce')
data['hs_directory']['lon'] = pd.to_numeric(data['hs_directory']['lon'], errors='coerce')
## The next step we will need to take is to condense some of the data we have
# First we will need to make sure every value in the DBN column is unique
## Condensing Class Size
# Create a new variable called class_size and assign the value of data['class_size']
class_size = data['class_size']
# Filter class_size so the 'GRADE ' column only contains the value 09-12
class_size = class_size[class_size['GRADE '] == '09-12']
# Filter class_size so that the 'PROGRAM TYPE' column only contains the value 'GEN ED'
class_size = class_size[class_size['PROGRAM TYPE'] == 'GEN ED']
## Computing Average Class Sizes
# Find the avergae values for each column for each DBN in class_size
class_size = class_size.groupby('DBN').agg(np.mean)
# DBN is now the index. Reset the index, making DBN a column again
class_size.reset_index(inplace=True)
data['class_size'] = class_size
## Condensing Demographics
# Filter demographics and only select rows where schoolyear is 20112012
data['demographics'] = data['demographics'][data['demographics']['schoolyear'] == 20112012]
## Condensing Graduation
# Filter graduation and only select rows where the Cohort column equals 2006
data['graduation'] = data['graduation'][data['graduation']['Cohort'] == '2006']
# Filter graduation and only select rows where the Demographic column equals 'Total Cohort'
data['graduation'] = data['graduation'][data['graduation']['Demographic'] == 'Total Cohort']
## Converting AP Test Scores
cols = ['AP Test Takers ', 'Total Exams Taken', 'Number of Exams with scores 3 4 or 5']
# Convert columns in ap_2010 to numeric values
for col in cols:
data['ap_2010'][col] = pd.to_numeric(data['ap_2010'][col], errors='coerce')
### Now it is finally time to start merging the disparate datasets
## Performing The Left Joins
# Both the ap_2010 and the graduation datasets have many missing DBN values, so we'll use a left
# join when we join the sat_results dataset with them. A left join means that our final Dataframe
# will have all the same DBN values as the original sat_results Dataframe.
# Merge sat_results, ap_2010, and graduation using left joins
combined = data["sat_results"]
combined = combined.merge(data['ap_2010'], how='left', on='DBN')
combined = combined.merge(data['graduation'], how='left', on='DBN')
## Performing the Inner Joins
# Now that we've done the left joins, we still have class_size, demographics, survey, and
# hs_directory left to merge into combined. Because these files contain information that's more
# valuable to our analysis, and because they have fewer missing DBN values, we'll use the inner join
# type when merging these into combined.
combined = combined.merge(data['class_size'], how='inner', on='DBN')
combined = combined.merge(data['demographics'], how='inner', on='DBN')
combined = combined.merge(data['survey'], how='inner', on='DBN')
combined = combined.merge(data['hs_directory'], how='inner', on='DBN')
## Filling In Missing values
# Since we did a number of left joins, we have a number of columns with missing data. There are many
# ways to deal with this, one is to replace missing values with the column mean.
# Some analyses can deal with missing values (plotitng), but other analyses cannot (correlation).
# Compute the means of all the columns in combined
means = combined.mean()
# Fill in any missing values in combined iwth the column means
combined = combined.fillna(means)
# Fill in any remaining missing values in combined with 0
combined = combined.fillna(0)
### We've finished cleaning and combining our data! We now have a clean dataset on which we can base our analysis.
## Adding A School District Column
# One type of analysis that we might want to do is mapping out statistics on a school district level.
# In order to help us do this, it will be useful to add a column that specifies the school district to the dataset.
# The school district is just the first two characters of the DBN.
def first_2_chars(s):
""" Extract the first 2 characters of a string and return them.
@param s : str - input string
@return str - first 2 characters in s
"""
return s[0:2]
# Apply the function to the DBN column of combined, and assign result to a new column
combined['school_dist'] = combined['DBN'].apply(first_2_chars)
### --------------- Analying Data
# It is time to find correlations, make plots, and make maps.
# The first step that we'll take is to find correlations between every column and sat_score.
# This will help us figure out what columns might be interesting to investigate more or plot out.
## Finding correlations
# Correlations tell us how closely related two columns are. We'll be using the r value, also called
# Pearson's correlation coefficient, which measures how closely two sequences of numbers are correlated.
# An r value falls between -1 and 1, and tells you if the two columns are positively correlated, not
# correlated, or negatively correlated. The closer to 1 the r value is, the more strongly positively
# correlated the columns are. The closer to -1 the r value is, the more strongly negatively correlated
# the columns are. The closer to 0 the r value is, the less the columns are correlated.
# In general r-values :
# - above .25 or below -.25 are enough to qualify a correlation as interesting (potentially relevant)
# - above .45 or below -.45 tend to be signficient correlations (usually relevant)
# - above .65 or below -.65 tend to be strong correlations (almost always relevant)
# Find all possible correlations in the "combined" DataFrame
correlations = combined.corr()
# Filter correltaions so that only correlations for the column "sat_score" are shown
correlations = correlations['sat_score']
# Drop NaN values
correlations = correlations.dropna()
# Sort by correlation value
correlations.sort(ascending=False, inplace=True)
# Diplay all the rows in correlations with a correlation above 0.25 or below -0.25
print('Significant correlations with overall SAT score:')
print(correlations[abs(correlations) > 0.25])
# Note the extremely high negative correlation (< -0.7) between SAT Scores and the percentage of
# students receiving a free or reduced-cost lunch (frl_percent). The frl_percent is a direct measure
# of the percentage of students living in (or near) poverty.
## Plotting Enrollment
# Enable matplotlib interactive
plt.ion()
# Plot Total Enrollment vs SAT Score
combined.plot.scatter(x='sat_score', y='total_enrollment')
plt.show()
## Exploring Schools With Low SAT Scores And Enrollment
# From looking at the plot we just generated, it doesn't appear that there's a Significant correlations
# between SAT Score and total enrollment. However, there is an interesting cluster of points at the
# bottom left where total_enrollment and sat_score are both low.
# Filter the combined Dataframe, and only keep rows with low sat_score and total_enrollment
low_enrollment = combined[combined['total_enrollment'] < 1000]
low_enrollment = combined[combined['sat_score'] < 1000]
# Display all the items in the School Name column of low enrollment
print('\nLow Enrollment schools with Low SAT Scores')
print(low_enrollment['School Name'])
# All of these schools appear to be international schools intended for recent
# immigrants from a foreign country who speak English as a second language.
## Plotting Language Learning Percentage
# From our research in the last screen, we found that most of the high schools with low total
# enrollment and low SAT scores are actually schools with a high percentage of English language
# learners enrolled. This indicates that it's actually ell_percent that correlates strongly with
# sat_score instead of total_enrollment
combined.plot.scatter(x='sat_score', y='ell_percent')
plt.show()
## Mapping the Schools
# It looks like ell_percent correlates with sat_score more strongly, because the scatterplot is
# more linear. However, there's still the cluster with very high ell_percent and low sat_score,
# which is the same group of international high schools that we investigated earlier.
# In order to explore this relationship, we'll want to map out ell_percent by school district,
# so we can more easily see which parts of the city have a lot of English language learners.
# Setup the Matplotlib Basemap centered on New York City
plt.figure()
m = Basemap(projection='merc',
llcrnrlat=40.496044,
urcrnrlat=40.915256,
llcrnrlon=-74.255735,
urcrnrlon=-73.700272,
resolution='i')
m.drawmapboundary(fill_color='#85A6D9')
m.drawcoastlines(color='#6D5F47', linewidth=.4)
m.drawrivers(color='#6D5F47', linewidth=.4)
# Convert the lat and lon columns of combined to lists
longitudes = combined['lon'].tolist()
latitudes = combined['lat'].tolist()
# Plot the locations
m.scatter(longitudes, latitudes, s=20, zorder=2, latlon=True)
plt.show()
## Plotting Out Statistics
# Now that we can plot out the positions of the schools, we can start to display meaningful
# information on maps, such as the percentage of English language learners by area.
#
# We can shade each point in the scatterplot by passing the keyword argument c into the scatter
# method. The c keyword argument will accept a sequence of numbers, and will shade points
# corresponding to lower numbers or higher numbers differently.
#
# Whatever sequence of numbers we pass into the c keyword argument will be converted to a range
# from 0 to 1. These values will then be mapped onto a color map.
plt.figure()
m = Basemap(projection='merc',
llcrnrlat=40.496044,
urcrnrlat=40.915256,
llcrnrlon=-74.255735,
urcrnrlon=-73.700272,
resolution='i')
m.drawmapboundary(fill_color='#85A6D9')
m.drawcoastlines(color='#6D5F47', linewidth=.4)
m.drawrivers(color='#6D5F47', linewidth=.4)
# Convert the lat and lon columns of combined to lists
longitudes = combined['lon'].tolist()
latitudes = combined['lat'].tolist()
# Plot the locations
m.scatter(longitudes, latitudes, s=20, zorder=2, latlon=True, c=combined['ell_percent'], cmap='summer')
plt.show()
## Calculating District Level Statistics
# Unfortunately, due to the number of schools, it's hard to interpret the map we made in the last
# screen. It looks like uptown Manhattan and parts of Queens have a higher ell_percent, but we can't
# be sure. One way to make it easier to read very granular statistics is to aggregate them. In this
# case, we can aggregate based on district, which will enable us to plot ell_percent district by
# district instead of school by school.
# Find the average values for each column for each school_dist in combined
districts = combined.groupby('school_dist').agg(np.mean)
# Reset the index of districts, making school_dist a column again
districts.reset_index(inplace=True)
## Plotting ell_percent by District
# Now that we've taken the mean of all the columns, we can plot out ell_percent by district. Not
# only did we find the mean of ell_percent, we also took the mean of the lon and lat columns, which
# will give us the coordinates for the center of each district.
# Setup the Matplotlib Basemap centered on New York City
plt.figure()
m = Basemap(projection='merc',
llcrnrlat=40.496044,
urcrnrlat=40.915256,
llcrnrlon=-74.255735,
urcrnrlon=-73.700272,
resolution='i')
m.drawmapboundary(fill_color='#85A6D9')
m.drawcoastlines(color='#6D5F47', linewidth=.4)
m.drawrivers(color='#6D5F47', linewidth=.4)
# Convert the lat and lon columns of districts to lists
longitudes = districts['lon'].tolist()
latitudes = districts['lat'].tolist()
# Plot the locations
m.scatter(longitudes, latitudes, s=50, zorder=2, latlon=True, c=districts['ell_percent'], cmap='summer')
plt.show()
|
{
"content_hash": "4e6c7398a3138831cb7cb58f584aecef",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 150,
"avg_line_length": 43.225641025641025,
"alnum_prop": 0.7295052793925733,
"repo_name": "tleonhardt/CodingPlayground",
"id": "04311252a6f9b0efcbb1f363596c9bfe7dec5b62",
"size": "16880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataquest/DataCleaning/cleaning_walkthrough.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "30533"
},
{
"name": "C++",
"bytes": "2514"
},
{
"name": "CMake",
"bytes": "3607"
},
{
"name": "Cython",
"bytes": "3972"
},
{
"name": "HTML",
"bytes": "1700"
},
{
"name": "Jupyter Notebook",
"bytes": "2056095"
},
{
"name": "Makefile",
"bytes": "161"
},
{
"name": "Python",
"bytes": "244507"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "SWIG",
"bytes": "1120"
},
{
"name": "Shell",
"bytes": "893"
}
],
"symlink_target": ""
}
|
"""
Class for PXE bare-metal nodes.
"""
import datetime
import os
import jinja2
from oslo.config import cfg
from nova.compute import flavors
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import timeutils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
pxe_opts = [
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
cfg.StrOpt('deploy_ramdisk',
help='Default ramdisk image ID used in deployment phase'),
cfg.StrOpt('net_config_template',
default='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
help='Template file for injected network config'),
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help='additional append parameters for baremetal PXE boot'),
cfg.StrOpt('pxe_config_template',
default='$pybasedir/nova/virt/baremetal/pxe_config.template',
help='Template file for PXE configuration'),
cfg.BoolOpt('use_file_injection',
help='If True, enable file injection for network info, '
'files and admin password',
default=True),
cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)',
default=0),
cfg.BoolOpt('pxe_network_config',
help='If set, pass the network configuration details to the '
'initramfs via cmdline.',
default=False),
cfg.StrOpt('pxe_bootfile_name',
help='This gets passed to Neutron as the bootfile dhcp '
'parameter when the dhcp_options_enabled is set.',
default='pxelinux.0'),
]
LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(pxe_opts, baremetal_group)
CONF.import_opt('use_ipv6', 'nova.netconf')
def build_pxe_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
template = None
if not CONF.use_ipv6:
template = "ip=%(address)s::%(gateway)s:%(netmask)s::%(name)s:off"
else:
template = ("ip=[%(address_v6)s]::[%(gateway_v6)s]:"
"[%(netmask_v6)s]::%(name)s:off")
net_config = [template % iface for iface in interfaces]
return ' '.join(net_config)
def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
deployment_aki_path, deployment_ari_path,
aki_path, ari_path, network_info):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
given all the required parameters.
The resulting file has both a "deploy" and "boot" label, which correspond
to the two phases of booting. This may be extended later.
"""
LOG.debug(_("Building PXE config for deployment %s.") % deployment_id)
network_config = None
if network_info and CONF.baremetal.pxe_network_config:
network_config = build_pxe_network_config(network_info)
pxe_options = {
'deployment_id': deployment_id,
'deployment_key': deployment_key,
'deployment_iscsi_iqn': deployment_iscsi_iqn,
'deployment_aki_path': deployment_aki_path,
'deployment_ari_path': deployment_ari_path,
'aki_path': aki_path,
'ari_path': ari_path,
'pxe_append_params': CONF.baremetal.pxe_append_params,
'pxe_network_config': network_config,
}
tmpl_path, tmpl_file = os.path.split(CONF.baremetal.pxe_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'pxe_options': pxe_options,
'ROOT': '${ROOT}'})
def build_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
tmpl_path, tmpl_file = os.path.split(CONF.baremetal.net_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6})
def get_deploy_aki_id(flavor):
return flavor.get('extra_specs', {}).\
get('baremetal:deploy_kernel_id', CONF.baremetal.deploy_kernel)
def get_deploy_ari_id(flavor):
return flavor.get('extra_specs', {}).\
get('baremetal:deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk)
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_pxe_config_file_path(instance):
"""Generate the path for an instances PXE config file."""
return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config')
def get_partition_sizes(instance):
flavor = flavors.extract_flavor(instance)
root_mb = flavor['root_gb'] * 1024
swap_mb = flavor['swap']
ephemeral_mb = flavor['ephemeral_gb'] * 1024
# NOTE(deva): For simpler code paths on the deployment side,
# we always create a swap partition. If the flavor
# does not specify any swap, we default to 1MB
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb, ephemeral_mb)
def get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name."""
return os.path.join(
CONF.baremetal.tftp_root,
'pxelinux.cfg',
"01-" + mac.replace(":", "-").lower()
)
def get_tftp_image_info(instance, flavor):
"""Generate the paths for tftp files for this instance
Raises NovaException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
flavor['extra_specs'] and defaults are not set
"""
image_info = {
'kernel': [None, None],
'ramdisk': [None, None],
'deploy_kernel': [None, None],
'deploy_ramdisk': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
image_info['ramdisk'][0] = str(instance['ramdisk_id'])
image_info['deploy_kernel'][0] = get_deploy_aki_id(flavor)
image_info['deploy_ramdisk'][0] = get_deploy_ari_id(flavor)
except KeyError:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate PXE bootloader. The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class PXE(base.NodeDriver):
"""PXE bare metal driver."""
def __init__(self, virtapi):
super(PXE, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.baremetal.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id']
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
# NOTE(deva): We assume that if we're not using a kernel,
# then the target partition is the first partition
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
# NOTE(deva): copy so we dont modify the original
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=instance['metadata'],
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
flavor = self.virtapi.flavor_get(context, instance['instance_type_id'])
tftp_image_info = get_tftp_image_info(instance, flavor)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
if CONF.baremetal.use_file_injection:
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
def dhcp_options_for_instance(self, instance):
return [{'opt_name': 'bootfile-name',
'opt_value': CONF.baremetal.pxe_bootfile_name},
{'opt_name': 'server-ip-address',
'opt_value': CONF.my_ip},
{'opt_name': 'tftp-server',
'opt_value': CONF.my_ip}
]
def activate_bootloader(self, context, node, instance, network_info):
"""Configure PXE boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
ramdisk
deploy_kernel
deploy_ramdisk
config
./pxelinux.cfg/
{mac} -> ../{uuid}/config
"""
flavor = self.virtapi.flavor_get(context, instance['instance_type_id'])
image_info = get_tftp_image_info(instance, flavor)
(root_mb, swap_mb, ephemeral_mb) = get_partition_sizes(instance)
pxe_config_file_path = get_pxe_config_file_path(instance)
image_file_path = get_image_file_path(instance)
deployment_key = bm_utils.random_alnum(32)
deployment_iscsi_iqn = "iqn-%s" % instance['uuid']
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': pxe_config_file_path,
'root_mb': root_mb,
'swap_mb': swap_mb,
'ephemeral_mb': ephemeral_mb})
pxe_config = build_pxe_config(
node['id'],
deployment_key,
deployment_iscsi_iqn,
image_info['deploy_kernel'][1],
image_info['deploy_ramdisk'][1],
image_info['kernel'][1],
image_info['ramdisk'][1],
network_info,
)
bm_utils.write_to_file(pxe_config_file_path, pxe_config)
macs = self._collect_mac_addresses(context, node)
for mac in macs:
mac_path = get_pxe_mac_path(mac)
bm_utils.unlink_without_raise(mac_path)
bm_utils.create_link_without_raise(pxe_config_file_path, mac_path)
def deactivate_bootloader(self, context, node, instance):
"""Delete PXE bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
# NOTE(danms): the flavor extra_specs do not need to be
# present/correct at deactivate time, so pass something empty
# to avoid an extra lookup
flavor = dict(extra_specs={
'baremetal:deploy_ramdisk_id': 'ignore',
'baremetal:deploy_kernel_id': 'ignore'})
try:
image_info = get_tftp_image_info(instance, flavor)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.unlink_without_raise(path)
bm_utils.unlink_without_raise(get_pxe_config_file_path(instance))
try:
macs = self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
else:
for mac in macs:
bm_utils.unlink_without_raise(get_pxe_mac_path(mac))
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
def activate_node(self, context, node, instance):
"""Wait for PXE deployment to complete."""
locals = {'error': '', 'started': False}
def _wait_for_deploy():
"""Called at an interval until the deployment completes."""
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
raise loopingcall.LoopingCallDone()
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING
and locals['started'] == False):
LOG.info(_("PXE deploy started for instance %s")
% instance['uuid'])
locals['started'] = True
elif status in (baremetal_states.DEPLOYDONE,
baremetal_states.ACTIVE):
LOG.info(_("PXE deploy completed for instance %s")
% instance['uuid'])
raise loopingcall.LoopingCallDone()
elif status == baremetal_states.DEPLOYFAIL:
locals['error'] = _("PXE deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if (CONF.baremetal.pxe_deploy_timeout and
timeutils.utcnow() > expiration):
locals['error'] = _("Timeout reached while waiting for "
"PXE deploy of instance %s")
if locals['error']:
raise loopingcall.LoopingCallDone()
expiration = timeutils.utcnow() + datetime.timedelta(
seconds=CONF.baremetal.pxe_deploy_timeout)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
timer.start(interval=1).wait()
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass
|
{
"content_hash": "7aa3f1fa42cd07135cfb7d9412e2ba45",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 79,
"avg_line_length": 39.37344398340249,
"alnum_prop": 0.5804615871008536,
"repo_name": "sacharya/nova",
"id": "8e0514167d81a4f3876a316b2f9db5e2bedc8829",
"size": "19718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/baremetal/pxe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13505239"
},
{
"name": "Shell",
"bytes": "16239"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.