body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
9e92f898a900c3e01b6002d8c06e2ac2a4d78ed344a9a9d6c7e2f8f072ec6fff
|
def dict_isect_combine(dict1, dict2, combine_op=op.add):
' Intersection of dict keys and combination of dict values '
keys3 = set(dict1.keys()).intersection(set(dict2.keys()))
dict3 = {key: combine_op(dict1[key], dict2[key]) for key in keys3}
return dict3
|
Intersection of dict keys and combination of dict values
|
utool/util_dict.py
|
dict_isect_combine
|
Erotemic/utool
| 8
|
python
|
def dict_isect_combine(dict1, dict2, combine_op=op.add):
' '
keys3 = set(dict1.keys()).intersection(set(dict2.keys()))
dict3 = {key: combine_op(dict1[key], dict2[key]) for key in keys3}
return dict3
|
def dict_isect_combine(dict1, dict2, combine_op=op.add):
' '
keys3 = set(dict1.keys()).intersection(set(dict2.keys()))
dict3 = {key: combine_op(dict1[key], dict2[key]) for key in keys3}
return dict3<|docstring|>Intersection of dict keys and combination of dict values<|endoftext|>
|
9567f5f129bcd6c82bbfa677ad311c5b2e0101f18555ebc7e5cebf9ae6c9bdeb
|
def dict_union_combine(dict1, dict2, combine_op=op.add, default=util_const.NoParam, default2=util_const.NoParam):
'\n Combine of dict keys and uses dfault value when key does not exist\n\n CAREFUL WHEN USING THIS WITH REDUCE. Use dict_stack2 instead\n '
keys3 = set(dict1.keys()).union(set(dict2.keys()))
if (default is util_const.NoParam):
dict3 = {key: combine_op(dict1[key], dict2[key]) for key in keys3}
else:
if (default2 is util_const.NoParam):
default2 = default
dict3 = {key: combine_op(dict1.get(key, default), dict2.get(key, default2)) for key in keys3}
return dict3
|
Combine of dict keys and uses dfault value when key does not exist
CAREFUL WHEN USING THIS WITH REDUCE. Use dict_stack2 instead
|
utool/util_dict.py
|
dict_union_combine
|
Erotemic/utool
| 8
|
python
|
def dict_union_combine(dict1, dict2, combine_op=op.add, default=util_const.NoParam, default2=util_const.NoParam):
'\n Combine of dict keys and uses dfault value when key does not exist\n\n CAREFUL WHEN USING THIS WITH REDUCE. Use dict_stack2 instead\n '
keys3 = set(dict1.keys()).union(set(dict2.keys()))
if (default is util_const.NoParam):
dict3 = {key: combine_op(dict1[key], dict2[key]) for key in keys3}
else:
if (default2 is util_const.NoParam):
default2 = default
dict3 = {key: combine_op(dict1.get(key, default), dict2.get(key, default2)) for key in keys3}
return dict3
|
def dict_union_combine(dict1, dict2, combine_op=op.add, default=util_const.NoParam, default2=util_const.NoParam):
'\n Combine of dict keys and uses dfault value when key does not exist\n\n CAREFUL WHEN USING THIS WITH REDUCE. Use dict_stack2 instead\n '
keys3 = set(dict1.keys()).union(set(dict2.keys()))
if (default is util_const.NoParam):
dict3 = {key: combine_op(dict1[key], dict2[key]) for key in keys3}
else:
if (default2 is util_const.NoParam):
default2 = default
dict3 = {key: combine_op(dict1.get(key, default), dict2.get(key, default2)) for key in keys3}
return dict3<|docstring|>Combine of dict keys and uses dfault value when key does not exist
CAREFUL WHEN USING THIS WITH REDUCE. Use dict_stack2 instead<|endoftext|>
|
04bc3d8eadff3f4743541f9df4f0ae5728f11e7ed6f4f8db899ed1aef859973f
|
def dict_filter_nones(dict_):
"\n Removes None values\n\n Args:\n dict_ (dict): a dictionary\n\n Returns:\n dict:\n\n CommandLine:\n python -m utool.util_dict --exec-dict_filter_nones\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> # UNSTABLE_DOCTEST\n >>> # fails on python 3 because of dict None order\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: None, 2: 'blue', 3: 'four', None: 'fun'}\n >>> dict2_ = dict_filter_nones(dict_)\n >>> result = ut.repr4(dict2_, nl=False)\n >>> print(result)\n {None: 'fun', 2: 'blue', 3: 'four'}\n "
dict2_ = {key: val for (key, val) in six.iteritems(dict_) if (val is not None)}
return dict2_
|
Removes None values
Args:
dict_ (dict): a dictionary
Returns:
dict:
CommandLine:
python -m utool.util_dict --exec-dict_filter_nones
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> # fails on python 3 because of dict None order
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: None, 2: 'blue', 3: 'four', None: 'fun'}
>>> dict2_ = dict_filter_nones(dict_)
>>> result = ut.repr4(dict2_, nl=False)
>>> print(result)
{None: 'fun', 2: 'blue', 3: 'four'}
|
utool/util_dict.py
|
dict_filter_nones
|
Erotemic/utool
| 8
|
python
|
def dict_filter_nones(dict_):
"\n Removes None values\n\n Args:\n dict_ (dict): a dictionary\n\n Returns:\n dict:\n\n CommandLine:\n python -m utool.util_dict --exec-dict_filter_nones\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> # UNSTABLE_DOCTEST\n >>> # fails on python 3 because of dict None order\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: None, 2: 'blue', 3: 'four', None: 'fun'}\n >>> dict2_ = dict_filter_nones(dict_)\n >>> result = ut.repr4(dict2_, nl=False)\n >>> print(result)\n {None: 'fun', 2: 'blue', 3: 'four'}\n "
dict2_ = {key: val for (key, val) in six.iteritems(dict_) if (val is not None)}
return dict2_
|
def dict_filter_nones(dict_):
"\n Removes None values\n\n Args:\n dict_ (dict): a dictionary\n\n Returns:\n dict:\n\n CommandLine:\n python -m utool.util_dict --exec-dict_filter_nones\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> # UNSTABLE_DOCTEST\n >>> # fails on python 3 because of dict None order\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: None, 2: 'blue', 3: 'four', None: 'fun'}\n >>> dict2_ = dict_filter_nones(dict_)\n >>> result = ut.repr4(dict2_, nl=False)\n >>> print(result)\n {None: 'fun', 2: 'blue', 3: 'four'}\n "
dict2_ = {key: val for (key, val) in six.iteritems(dict_) if (val is not None)}
return dict2_<|docstring|>Removes None values
Args:
dict_ (dict): a dictionary
Returns:
dict:
CommandLine:
python -m utool.util_dict --exec-dict_filter_nones
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> # fails on python 3 because of dict None order
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: None, 2: 'blue', 3: 'four', None: 'fun'}
>>> dict2_ = dict_filter_nones(dict_)
>>> result = ut.repr4(dict2_, nl=False)
>>> print(result)
{None: 'fun', 2: 'blue', 3: 'four'}<|endoftext|>
|
79f04e0782e90a4e6cbc0d91da3982fb2fa9c1e374d46dc200eb98391aec9ef4
|
def groupby_tags(item_list, tags_list):
"\n case where an item can belong to multiple groups\n\n Args:\n item_list (list):\n tags_list (list):\n\n Returns:\n dict: groupid_to_items\n\n CommandLine:\n python -m utool.util_dict --test-groupby_tags\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> tagged_item_list = {\n >>> 'spam': ['meat', 'protein', 'food'],\n >>> 'eggs': ['protein', 'food'],\n >>> 'cheese': ['dairy', 'protein', 'food'],\n >>> 'jam': ['fruit', 'food'],\n >>> 'banana': ['weapon', 'fruit', 'food'],\n >>> }\n >>> item_list = list(tagged_item_list.keys())\n >>> tags_list = list(tagged_item_list.values())\n >>> groupid_to_items = groupby_tags(item_list, tags_list)\n >>> groupid_to_items = ut.map_vals(sorted, groupid_to_items)\n >>> result = ('groupid_to_items = %s' % (ut.repr4(groupid_to_items),))\n >>> print(result)\n groupid_to_items = {\n 'dairy': ['cheese'],\n 'food': ['banana', 'cheese', 'eggs', 'jam', 'spam'],\n 'fruit': ['banana', 'jam'],\n 'meat': ['spam'],\n 'protein': ['cheese', 'eggs', 'spam'],\n 'weapon': ['banana'],\n }\n\n "
groupid_to_items = defaultdict(list)
for (tags, item) in zip(tags_list, item_list):
for tag in tags:
groupid_to_items[tag].append(item)
return groupid_to_items
|
case where an item can belong to multiple groups
Args:
item_list (list):
tags_list (list):
Returns:
dict: groupid_to_items
CommandLine:
python -m utool.util_dict --test-groupby_tags
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> tagged_item_list = {
>>> 'spam': ['meat', 'protein', 'food'],
>>> 'eggs': ['protein', 'food'],
>>> 'cheese': ['dairy', 'protein', 'food'],
>>> 'jam': ['fruit', 'food'],
>>> 'banana': ['weapon', 'fruit', 'food'],
>>> }
>>> item_list = list(tagged_item_list.keys())
>>> tags_list = list(tagged_item_list.values())
>>> groupid_to_items = groupby_tags(item_list, tags_list)
>>> groupid_to_items = ut.map_vals(sorted, groupid_to_items)
>>> result = ('groupid_to_items = %s' % (ut.repr4(groupid_to_items),))
>>> print(result)
groupid_to_items = {
'dairy': ['cheese'],
'food': ['banana', 'cheese', 'eggs', 'jam', 'spam'],
'fruit': ['banana', 'jam'],
'meat': ['spam'],
'protein': ['cheese', 'eggs', 'spam'],
'weapon': ['banana'],
}
|
utool/util_dict.py
|
groupby_tags
|
Erotemic/utool
| 8
|
python
|
def groupby_tags(item_list, tags_list):
"\n case where an item can belong to multiple groups\n\n Args:\n item_list (list):\n tags_list (list):\n\n Returns:\n dict: groupid_to_items\n\n CommandLine:\n python -m utool.util_dict --test-groupby_tags\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> tagged_item_list = {\n >>> 'spam': ['meat', 'protein', 'food'],\n >>> 'eggs': ['protein', 'food'],\n >>> 'cheese': ['dairy', 'protein', 'food'],\n >>> 'jam': ['fruit', 'food'],\n >>> 'banana': ['weapon', 'fruit', 'food'],\n >>> }\n >>> item_list = list(tagged_item_list.keys())\n >>> tags_list = list(tagged_item_list.values())\n >>> groupid_to_items = groupby_tags(item_list, tags_list)\n >>> groupid_to_items = ut.map_vals(sorted, groupid_to_items)\n >>> result = ('groupid_to_items = %s' % (ut.repr4(groupid_to_items),))\n >>> print(result)\n groupid_to_items = {\n 'dairy': ['cheese'],\n 'food': ['banana', 'cheese', 'eggs', 'jam', 'spam'],\n 'fruit': ['banana', 'jam'],\n 'meat': ['spam'],\n 'protein': ['cheese', 'eggs', 'spam'],\n 'weapon': ['banana'],\n }\n\n "
groupid_to_items = defaultdict(list)
for (tags, item) in zip(tags_list, item_list):
for tag in tags:
groupid_to_items[tag].append(item)
return groupid_to_items
|
def groupby_tags(item_list, tags_list):
"\n case where an item can belong to multiple groups\n\n Args:\n item_list (list):\n tags_list (list):\n\n Returns:\n dict: groupid_to_items\n\n CommandLine:\n python -m utool.util_dict --test-groupby_tags\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> tagged_item_list = {\n >>> 'spam': ['meat', 'protein', 'food'],\n >>> 'eggs': ['protein', 'food'],\n >>> 'cheese': ['dairy', 'protein', 'food'],\n >>> 'jam': ['fruit', 'food'],\n >>> 'banana': ['weapon', 'fruit', 'food'],\n >>> }\n >>> item_list = list(tagged_item_list.keys())\n >>> tags_list = list(tagged_item_list.values())\n >>> groupid_to_items = groupby_tags(item_list, tags_list)\n >>> groupid_to_items = ut.map_vals(sorted, groupid_to_items)\n >>> result = ('groupid_to_items = %s' % (ut.repr4(groupid_to_items),))\n >>> print(result)\n groupid_to_items = {\n 'dairy': ['cheese'],\n 'food': ['banana', 'cheese', 'eggs', 'jam', 'spam'],\n 'fruit': ['banana', 'jam'],\n 'meat': ['spam'],\n 'protein': ['cheese', 'eggs', 'spam'],\n 'weapon': ['banana'],\n }\n\n "
groupid_to_items = defaultdict(list)
for (tags, item) in zip(tags_list, item_list):
for tag in tags:
groupid_to_items[tag].append(item)
return groupid_to_items<|docstring|>case where an item can belong to multiple groups
Args:
item_list (list):
tags_list (list):
Returns:
dict: groupid_to_items
CommandLine:
python -m utool.util_dict --test-groupby_tags
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> tagged_item_list = {
>>> 'spam': ['meat', 'protein', 'food'],
>>> 'eggs': ['protein', 'food'],
>>> 'cheese': ['dairy', 'protein', 'food'],
>>> 'jam': ['fruit', 'food'],
>>> 'banana': ['weapon', 'fruit', 'food'],
>>> }
>>> item_list = list(tagged_item_list.keys())
>>> tags_list = list(tagged_item_list.values())
>>> groupid_to_items = groupby_tags(item_list, tags_list)
>>> groupid_to_items = ut.map_vals(sorted, groupid_to_items)
>>> result = ('groupid_to_items = %s' % (ut.repr4(groupid_to_items),))
>>> print(result)
groupid_to_items = {
'dairy': ['cheese'],
'food': ['banana', 'cheese', 'eggs', 'jam', 'spam'],
'fruit': ['banana', 'jam'],
'meat': ['spam'],
'protein': ['cheese', 'eggs', 'spam'],
'weapon': ['banana'],
}<|endoftext|>
|
da93930f53a4ff0396757171b08a10a3564101223ae0765cc376e5e619f7231d
|
def group_pairs(pair_list):
'\n Groups a list of items using the first element in each pair as the item and\n the second element as the groupid.\n\n Args:\n pair_list (list): list of 2-tuples (item, groupid)\n\n Returns:\n dict: groupid_to_items: maps a groupid to a list of items\n\n SeeAlso:\n group_items\n '
groupid_to_items = defaultdict(list)
for (item, groupid) in pair_list:
groupid_to_items[groupid].append(item)
return groupid_to_items
|
Groups a list of items using the first element in each pair as the item and
the second element as the groupid.
Args:
pair_list (list): list of 2-tuples (item, groupid)
Returns:
dict: groupid_to_items: maps a groupid to a list of items
SeeAlso:
group_items
|
utool/util_dict.py
|
group_pairs
|
Erotemic/utool
| 8
|
python
|
def group_pairs(pair_list):
'\n Groups a list of items using the first element in each pair as the item and\n the second element as the groupid.\n\n Args:\n pair_list (list): list of 2-tuples (item, groupid)\n\n Returns:\n dict: groupid_to_items: maps a groupid to a list of items\n\n SeeAlso:\n group_items\n '
groupid_to_items = defaultdict(list)
for (item, groupid) in pair_list:
groupid_to_items[groupid].append(item)
return groupid_to_items
|
def group_pairs(pair_list):
'\n Groups a list of items using the first element in each pair as the item and\n the second element as the groupid.\n\n Args:\n pair_list (list): list of 2-tuples (item, groupid)\n\n Returns:\n dict: groupid_to_items: maps a groupid to a list of items\n\n SeeAlso:\n group_items\n '
groupid_to_items = defaultdict(list)
for (item, groupid) in pair_list:
groupid_to_items[groupid].append(item)
return groupid_to_items<|docstring|>Groups a list of items using the first element in each pair as the item and
the second element as the groupid.
Args:
pair_list (list): list of 2-tuples (item, groupid)
Returns:
dict: groupid_to_items: maps a groupid to a list of items
SeeAlso:
group_items<|endoftext|>
|
4236ab8b92a5190559710a076584e7dda306a7b8ff7f2a1ba8ab6d8cc4b1aacc
|
def group_items(items, by=None, sorted_=True):
"\n Groups a list of items by group id.\n\n Args:\n items (list): a list of the values to be grouped.\n if `by` is None, then each item is assumed to be a\n (groupid, value) pair.\n by (list): a corresponding list to group items by.\n if specified, these are used as the keys to group values\n in `items`\n sorted_ (bool): if True preserves the ordering of items within groups\n (default = True) FIXME. the opposite is true\n\n Returns:\n dict: groupid_to_items: maps a groupid to a list of items\n\n SeeAlso:\n group_indices - first part of a a more fine grained grouping algorithm\n apply_gropuing - second part of a more fine grained grouping algorithm\n\n CommandLine:\n python -m utool.util_dict --test-group_items\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> items = ['ham', 'jam', 'spam', 'eggs', 'cheese', 'bannana']\n >>> by = ['protein', 'fruit', 'protein', 'protein', 'dairy', 'fruit']\n >>> groupid_to_items = ut.group_items(items, iter(by))\n >>> result = ut.repr2(groupid_to_items)\n >>> print(result)\n {'dairy': ['cheese'], 'fruit': ['jam', 'bannana'], 'protein': ['ham', 'spam', 'eggs']}\n "
if (by is not None):
pairs = list(zip(by, items))
if sorted_:
try:
pairs = sorted(pairs, key=op.itemgetter(0))
except TypeError:
pairs = sorted(pairs, key=(lambda tup: str(tup[0])))
else:
pairs = items
groupid_to_items = defaultdict(list)
for (groupid, item) in pairs:
groupid_to_items[groupid].append(item)
return groupid_to_items
|
Groups a list of items by group id.
Args:
items (list): a list of the values to be grouped.
if `by` is None, then each item is assumed to be a
(groupid, value) pair.
by (list): a corresponding list to group items by.
if specified, these are used as the keys to group values
in `items`
sorted_ (bool): if True preserves the ordering of items within groups
(default = True) FIXME. the opposite is true
Returns:
dict: groupid_to_items: maps a groupid to a list of items
SeeAlso:
group_indices - first part of a a more fine grained grouping algorithm
apply_gropuing - second part of a more fine grained grouping algorithm
CommandLine:
python -m utool.util_dict --test-group_items
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> items = ['ham', 'jam', 'spam', 'eggs', 'cheese', 'bannana']
>>> by = ['protein', 'fruit', 'protein', 'protein', 'dairy', 'fruit']
>>> groupid_to_items = ut.group_items(items, iter(by))
>>> result = ut.repr2(groupid_to_items)
>>> print(result)
{'dairy': ['cheese'], 'fruit': ['jam', 'bannana'], 'protein': ['ham', 'spam', 'eggs']}
|
utool/util_dict.py
|
group_items
|
Erotemic/utool
| 8
|
python
|
def group_items(items, by=None, sorted_=True):
"\n Groups a list of items by group id.\n\n Args:\n items (list): a list of the values to be grouped.\n if `by` is None, then each item is assumed to be a\n (groupid, value) pair.\n by (list): a corresponding list to group items by.\n if specified, these are used as the keys to group values\n in `items`\n sorted_ (bool): if True preserves the ordering of items within groups\n (default = True) FIXME. the opposite is true\n\n Returns:\n dict: groupid_to_items: maps a groupid to a list of items\n\n SeeAlso:\n group_indices - first part of a a more fine grained grouping algorithm\n apply_gropuing - second part of a more fine grained grouping algorithm\n\n CommandLine:\n python -m utool.util_dict --test-group_items\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> items = ['ham', 'jam', 'spam', 'eggs', 'cheese', 'bannana']\n >>> by = ['protein', 'fruit', 'protein', 'protein', 'dairy', 'fruit']\n >>> groupid_to_items = ut.group_items(items, iter(by))\n >>> result = ut.repr2(groupid_to_items)\n >>> print(result)\n {'dairy': ['cheese'], 'fruit': ['jam', 'bannana'], 'protein': ['ham', 'spam', 'eggs']}\n "
if (by is not None):
pairs = list(zip(by, items))
if sorted_:
try:
pairs = sorted(pairs, key=op.itemgetter(0))
except TypeError:
pairs = sorted(pairs, key=(lambda tup: str(tup[0])))
else:
pairs = items
groupid_to_items = defaultdict(list)
for (groupid, item) in pairs:
groupid_to_items[groupid].append(item)
return groupid_to_items
|
def group_items(items, by=None, sorted_=True):
"\n Groups a list of items by group id.\n\n Args:\n items (list): a list of the values to be grouped.\n if `by` is None, then each item is assumed to be a\n (groupid, value) pair.\n by (list): a corresponding list to group items by.\n if specified, these are used as the keys to group values\n in `items`\n sorted_ (bool): if True preserves the ordering of items within groups\n (default = True) FIXME. the opposite is true\n\n Returns:\n dict: groupid_to_items: maps a groupid to a list of items\n\n SeeAlso:\n group_indices - first part of a a more fine grained grouping algorithm\n apply_gropuing - second part of a more fine grained grouping algorithm\n\n CommandLine:\n python -m utool.util_dict --test-group_items\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> items = ['ham', 'jam', 'spam', 'eggs', 'cheese', 'bannana']\n >>> by = ['protein', 'fruit', 'protein', 'protein', 'dairy', 'fruit']\n >>> groupid_to_items = ut.group_items(items, iter(by))\n >>> result = ut.repr2(groupid_to_items)\n >>> print(result)\n {'dairy': ['cheese'], 'fruit': ['jam', 'bannana'], 'protein': ['ham', 'spam', 'eggs']}\n "
if (by is not None):
pairs = list(zip(by, items))
if sorted_:
try:
pairs = sorted(pairs, key=op.itemgetter(0))
except TypeError:
pairs = sorted(pairs, key=(lambda tup: str(tup[0])))
else:
pairs = items
groupid_to_items = defaultdict(list)
for (groupid, item) in pairs:
groupid_to_items[groupid].append(item)
return groupid_to_items<|docstring|>Groups a list of items by group id.
Args:
items (list): a list of the values to be grouped.
if `by` is None, then each item is assumed to be a
(groupid, value) pair.
by (list): a corresponding list to group items by.
if specified, these are used as the keys to group values
in `items`
sorted_ (bool): if True preserves the ordering of items within groups
(default = True) FIXME. the opposite is true
Returns:
dict: groupid_to_items: maps a groupid to a list of items
SeeAlso:
group_indices - first part of a a more fine grained grouping algorithm
apply_gropuing - second part of a more fine grained grouping algorithm
CommandLine:
python -m utool.util_dict --test-group_items
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> items = ['ham', 'jam', 'spam', 'eggs', 'cheese', 'bannana']
>>> by = ['protein', 'fruit', 'protein', 'protein', 'dairy', 'fruit']
>>> groupid_to_items = ut.group_items(items, iter(by))
>>> result = ut.repr2(groupid_to_items)
>>> print(result)
{'dairy': ['cheese'], 'fruit': ['jam', 'bannana'], 'protein': ['ham', 'spam', 'eggs']}<|endoftext|>
|
6719b3ba44449bfcbe6a96ac0300ec8f8ec79707c390531cee8bbee59fbc43f4
|
def hierarchical_group_items(item_list, groupids_list):
"\n Generalization of group_item. Convert a flast list of ids into a heirarchical dictionary.\n\n TODO: move to util_dict\n\n Reference:\n http://stackoverflow.com/questions/10193235/python-translate-a-table-to-a-hierarchical-dictionary\n\n Args:\n item_list (list):\n groupids_list (list):\n\n CommandLine:\n python -m utool.util_dict --exec-hierarchical_group_items\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4]\n >>> groupids_list = [[1, 1, 2, 2]]\n >>> tree = hierarchical_group_items(item_list, groupids_list)\n >>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))\n >>> print(result)\n tree = {1: [1, 2], 2: [3, 4]}\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]\n >>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]\n >>> tree = hierarchical_group_items(item_list, groupids_list)\n >>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))\n >>> print(result)\n tree = {\n 1: {1: [7], 2: [3], 3: [1, 5]},\n 2: {1: [6, 8], 2: [2, 4]},\n }\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4]\n >>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]\n >>> tree = hierarchical_group_items(item_list, groupids_list)\n >>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))\n >>> print(result)\n tree = {\n 1: {\n 1: {1: [1]},\n 2: {1: [3], 3: [2]},\n },\n 2: {\n 2: {1: [4]},\n },\n }\n\n "
num_groups = len(groupids_list)
leaf_type = partial(defaultdict, list)
if (num_groups > 1):
node_type = leaf_type
for _ in range((len(groupids_list) - 2)):
node_type = partial(defaultdict, node_type)
root_type = node_type
elif (num_groups == 1):
root_type = list
else:
raise ValueError('must suply groupids')
tree = defaultdict(root_type)
groupid_tuple_list = list(zip(*groupids_list))
for (groupid_tuple, item) in zip(groupid_tuple_list, item_list):
node = tree
for groupid in groupid_tuple:
node = node[groupid]
node.append(item)
return tree
|
Generalization of group_item. Convert a flast list of ids into a heirarchical dictionary.
TODO: move to util_dict
Reference:
http://stackoverflow.com/questions/10193235/python-translate-a-table-to-a-hierarchical-dictionary
Args:
item_list (list):
groupids_list (list):
CommandLine:
python -m utool.util_dict --exec-hierarchical_group_items
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4]
>>> groupids_list = [[1, 1, 2, 2]]
>>> tree = hierarchical_group_items(item_list, groupids_list)
>>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))
>>> print(result)
tree = {1: [1, 2], 2: [3, 4]}
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]
>>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]
>>> tree = hierarchical_group_items(item_list, groupids_list)
>>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))
>>> print(result)
tree = {
1: {1: [7], 2: [3], 3: [1, 5]},
2: {1: [6, 8], 2: [2, 4]},
}
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4]
>>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]
>>> tree = hierarchical_group_items(item_list, groupids_list)
>>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))
>>> print(result)
tree = {
1: {
1: {1: [1]},
2: {1: [3], 3: [2]},
},
2: {
2: {1: [4]},
},
}
|
utool/util_dict.py
|
hierarchical_group_items
|
Erotemic/utool
| 8
|
python
|
def hierarchical_group_items(item_list, groupids_list):
"\n Generalization of group_item. Convert a flast list of ids into a heirarchical dictionary.\n\n TODO: move to util_dict\n\n Reference:\n http://stackoverflow.com/questions/10193235/python-translate-a-table-to-a-hierarchical-dictionary\n\n Args:\n item_list (list):\n groupids_list (list):\n\n CommandLine:\n python -m utool.util_dict --exec-hierarchical_group_items\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4]\n >>> groupids_list = [[1, 1, 2, 2]]\n >>> tree = hierarchical_group_items(item_list, groupids_list)\n >>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))\n >>> print(result)\n tree = {1: [1, 2], 2: [3, 4]}\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]\n >>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]\n >>> tree = hierarchical_group_items(item_list, groupids_list)\n >>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))\n >>> print(result)\n tree = {\n 1: {1: [7], 2: [3], 3: [1, 5]},\n 2: {1: [6, 8], 2: [2, 4]},\n }\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4]\n >>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]\n >>> tree = hierarchical_group_items(item_list, groupids_list)\n >>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))\n >>> print(result)\n tree = {\n 1: {\n 1: {1: [1]},\n 2: {1: [3], 3: [2]},\n },\n 2: {\n 2: {1: [4]},\n },\n }\n\n "
num_groups = len(groupids_list)
leaf_type = partial(defaultdict, list)
if (num_groups > 1):
node_type = leaf_type
for _ in range((len(groupids_list) - 2)):
node_type = partial(defaultdict, node_type)
root_type = node_type
elif (num_groups == 1):
root_type = list
else:
raise ValueError('must suply groupids')
tree = defaultdict(root_type)
groupid_tuple_list = list(zip(*groupids_list))
for (groupid_tuple, item) in zip(groupid_tuple_list, item_list):
node = tree
for groupid in groupid_tuple:
node = node[groupid]
node.append(item)
return tree
|
def hierarchical_group_items(item_list, groupids_list):
"\n Generalization of group_item. Convert a flast list of ids into a heirarchical dictionary.\n\n TODO: move to util_dict\n\n Reference:\n http://stackoverflow.com/questions/10193235/python-translate-a-table-to-a-hierarchical-dictionary\n\n Args:\n item_list (list):\n groupids_list (list):\n\n CommandLine:\n python -m utool.util_dict --exec-hierarchical_group_items\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4]\n >>> groupids_list = [[1, 1, 2, 2]]\n >>> tree = hierarchical_group_items(item_list, groupids_list)\n >>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))\n >>> print(result)\n tree = {1: [1, 2], 2: [3, 4]}\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]\n >>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]\n >>> tree = hierarchical_group_items(item_list, groupids_list)\n >>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))\n >>> print(result)\n tree = {\n 1: {1: [7], 2: [3], 3: [1, 5]},\n 2: {1: [6, 8], 2: [2, 4]},\n }\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4]\n >>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]\n >>> tree = hierarchical_group_items(item_list, groupids_list)\n >>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))\n >>> print(result)\n tree = {\n 1: {\n 1: {1: [1]},\n 2: {1: [3], 3: [2]},\n },\n 2: {\n 2: {1: [4]},\n },\n }\n\n "
num_groups = len(groupids_list)
leaf_type = partial(defaultdict, list)
if (num_groups > 1):
node_type = leaf_type
for _ in range((len(groupids_list) - 2)):
node_type = partial(defaultdict, node_type)
root_type = node_type
elif (num_groups == 1):
root_type = list
else:
raise ValueError('must suply groupids')
tree = defaultdict(root_type)
groupid_tuple_list = list(zip(*groupids_list))
for (groupid_tuple, item) in zip(groupid_tuple_list, item_list):
node = tree
for groupid in groupid_tuple:
node = node[groupid]
node.append(item)
return tree<|docstring|>Generalization of group_item. Convert a flast list of ids into a heirarchical dictionary.
TODO: move to util_dict
Reference:
http://stackoverflow.com/questions/10193235/python-translate-a-table-to-a-hierarchical-dictionary
Args:
item_list (list):
groupids_list (list):
CommandLine:
python -m utool.util_dict --exec-hierarchical_group_items
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4]
>>> groupids_list = [[1, 1, 2, 2]]
>>> tree = hierarchical_group_items(item_list, groupids_list)
>>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))
>>> print(result)
tree = {1: [1, 2], 2: [3, 4]}
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]
>>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]
>>> tree = hierarchical_group_items(item_list, groupids_list)
>>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))
>>> print(result)
tree = {
1: {1: [7], 2: [3], 3: [1, 5]},
2: {1: [6, 8], 2: [2, 4]},
}
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4]
>>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]
>>> tree = hierarchical_group_items(item_list, groupids_list)
>>> result = ('tree = ' + ut.repr4(tree, nl=len(groupids_list) - 1))
>>> print(result)
tree = {
1: {
1: {1: [1]},
2: {1: [3], 3: [2]},
},
2: {
2: {1: [4]},
},
}<|endoftext|>
|
0f016ff507112d50cd62e21e1376e0dacd6815e64534381c8fa29c880382f27a
|
def iflatten_dict_values(node, depth=0):
'\n >>> from utool.util_dict import * # NOQA\n '
if isinstance(node, dict):
_iter = (iflatten_dict_values(value) for value in six.itervalues(node))
return util_iter.iflatten(_iter)
else:
return node
|
>>> from utool.util_dict import * # NOQA
|
utool/util_dict.py
|
iflatten_dict_values
|
Erotemic/utool
| 8
|
python
|
def iflatten_dict_values(node, depth=0):
'\n \n '
if isinstance(node, dict):
_iter = (iflatten_dict_values(value) for value in six.itervalues(node))
return util_iter.iflatten(_iter)
else:
return node
|
def iflatten_dict_values(node, depth=0):
'\n \n '
if isinstance(node, dict):
_iter = (iflatten_dict_values(value) for value in six.itervalues(node))
return util_iter.iflatten(_iter)
else:
return node<|docstring|>>>> from utool.util_dict import * # NOQA<|endoftext|>
|
298f434fe46aac2afab2eac96e07e37505640ca9ae0e59d50526780f1138b26e
|
def hierarchical_map_vals(func, node, max_depth=None, depth=0):
"\n node is a dict tree like structure with leaves of type list\n\n TODO: move to util_dict\n\n CommandLine:\n python -m utool.util_dict --exec-hierarchical_map_vals\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]\n >>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]\n >>> tree = ut.hierarchical_group_items(item_list, groupids_list)\n >>> len_tree = ut.hierarchical_map_vals(len, tree)\n >>> result = ('len_tree = ' + ut.repr4(len_tree, nl=1))\n >>> print(result)\n len_tree = {\n 1: {1: 1, 2: 1, 3: 2},\n 2: {1: 2, 2: 2},\n }\n\n Example1:\n >>> # DISABLE_DOCTEST\n >>> # UNSTABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> depth = 4\n >>> item_list = list(range(2 ** (depth + 1)))\n >>> num = len(item_list) // 2\n >>> groupids_list = []\n >>> total = 0\n >>> for level in range(depth):\n ... num2 = len(item_list) // int((num * 2))\n ... #nonflat_levelids = [([total + 2 * x + 1] * num + [total + 2 * x + 2] * num) for x in range(num2)]\n ... nonflat_levelids = [([1] * num + [2] * num) for x in range(num2)]\n ... levelids = ut.flatten(nonflat_levelids)\n ... groupids_list.append(levelids)\n ... total += num2 * 2\n ... num //= 2\n >>> print('groupids_list = %s' % (ut.repr4(groupids_list, nl=1),))\n >>> print('depth = %r' % (len(groupids_list),))\n >>> tree = ut.hierarchical_group_items(item_list, groupids_list)\n >>> print('tree = ' + ut.repr4(tree, nl=None))\n >>> flat_tree_values = list(ut.iflatten_dict_values(tree))\n >>> assert sorted(flat_tree_values) == sorted(item_list)\n >>> print('flat_tree_values = ' + str(flat_tree_values))\n >>> #print('flat_tree_keys = ' + str(list(ut.iflatten_dict_keys(tree))))\n >>> #print('iflatten_dict_items = ' + str(list(ut.iflatten_dict_items(tree))))\n >>> len_tree = ut.hierarchical_map_vals(len, tree, max_depth=4)\n >>> result = ('len_tree = ' + ut.repr4(len_tree, nl=None))\n >>> print(result)\n\n "
if (not hasattr(node, 'items')):
return func(node)
elif ((max_depth is not None) and (depth >= max_depth)):
return map_dict_vals(func, node)
else:
keyval_list = [(key, hierarchical_map_vals(func, val, max_depth, (depth + 1))) for (key, val) in node.items()]
if isinstance(node, OrderedDict):
return OrderedDict(keyval_list)
else:
return dict(keyval_list)
|
node is a dict tree like structure with leaves of type list
TODO: move to util_dict
CommandLine:
python -m utool.util_dict --exec-hierarchical_map_vals
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]
>>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]
>>> tree = ut.hierarchical_group_items(item_list, groupids_list)
>>> len_tree = ut.hierarchical_map_vals(len, tree)
>>> result = ('len_tree = ' + ut.repr4(len_tree, nl=1))
>>> print(result)
len_tree = {
1: {1: 1, 2: 1, 3: 2},
2: {1: 2, 2: 2},
}
Example1:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> depth = 4
>>> item_list = list(range(2 ** (depth + 1)))
>>> num = len(item_list) // 2
>>> groupids_list = []
>>> total = 0
>>> for level in range(depth):
... num2 = len(item_list) // int((num * 2))
... #nonflat_levelids = [([total + 2 * x + 1] * num + [total + 2 * x + 2] * num) for x in range(num2)]
... nonflat_levelids = [([1] * num + [2] * num) for x in range(num2)]
... levelids = ut.flatten(nonflat_levelids)
... groupids_list.append(levelids)
... total += num2 * 2
... num //= 2
>>> print('groupids_list = %s' % (ut.repr4(groupids_list, nl=1),))
>>> print('depth = %r' % (len(groupids_list),))
>>> tree = ut.hierarchical_group_items(item_list, groupids_list)
>>> print('tree = ' + ut.repr4(tree, nl=None))
>>> flat_tree_values = list(ut.iflatten_dict_values(tree))
>>> assert sorted(flat_tree_values) == sorted(item_list)
>>> print('flat_tree_values = ' + str(flat_tree_values))
>>> #print('flat_tree_keys = ' + str(list(ut.iflatten_dict_keys(tree))))
>>> #print('iflatten_dict_items = ' + str(list(ut.iflatten_dict_items(tree))))
>>> len_tree = ut.hierarchical_map_vals(len, tree, max_depth=4)
>>> result = ('len_tree = ' + ut.repr4(len_tree, nl=None))
>>> print(result)
|
utool/util_dict.py
|
hierarchical_map_vals
|
Erotemic/utool
| 8
|
python
|
def hierarchical_map_vals(func, node, max_depth=None, depth=0):
"\n node is a dict tree like structure with leaves of type list\n\n TODO: move to util_dict\n\n CommandLine:\n python -m utool.util_dict --exec-hierarchical_map_vals\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]\n >>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]\n >>> tree = ut.hierarchical_group_items(item_list, groupids_list)\n >>> len_tree = ut.hierarchical_map_vals(len, tree)\n >>> result = ('len_tree = ' + ut.repr4(len_tree, nl=1))\n >>> print(result)\n len_tree = {\n 1: {1: 1, 2: 1, 3: 2},\n 2: {1: 2, 2: 2},\n }\n\n Example1:\n >>> # DISABLE_DOCTEST\n >>> # UNSTABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> depth = 4\n >>> item_list = list(range(2 ** (depth + 1)))\n >>> num = len(item_list) // 2\n >>> groupids_list = []\n >>> total = 0\n >>> for level in range(depth):\n ... num2 = len(item_list) // int((num * 2))\n ... #nonflat_levelids = [([total + 2 * x + 1] * num + [total + 2 * x + 2] * num) for x in range(num2)]\n ... nonflat_levelids = [([1] * num + [2] * num) for x in range(num2)]\n ... levelids = ut.flatten(nonflat_levelids)\n ... groupids_list.append(levelids)\n ... total += num2 * 2\n ... num //= 2\n >>> print('groupids_list = %s' % (ut.repr4(groupids_list, nl=1),))\n >>> print('depth = %r' % (len(groupids_list),))\n >>> tree = ut.hierarchical_group_items(item_list, groupids_list)\n >>> print('tree = ' + ut.repr4(tree, nl=None))\n >>> flat_tree_values = list(ut.iflatten_dict_values(tree))\n >>> assert sorted(flat_tree_values) == sorted(item_list)\n >>> print('flat_tree_values = ' + str(flat_tree_values))\n >>> #print('flat_tree_keys = ' + str(list(ut.iflatten_dict_keys(tree))))\n >>> #print('iflatten_dict_items = ' + str(list(ut.iflatten_dict_items(tree))))\n >>> len_tree = ut.hierarchical_map_vals(len, tree, max_depth=4)\n >>> result = ('len_tree = ' + ut.repr4(len_tree, nl=None))\n >>> print(result)\n\n "
if (not hasattr(node, 'items')):
return func(node)
elif ((max_depth is not None) and (depth >= max_depth)):
return map_dict_vals(func, node)
else:
keyval_list = [(key, hierarchical_map_vals(func, val, max_depth, (depth + 1))) for (key, val) in node.items()]
if isinstance(node, OrderedDict):
return OrderedDict(keyval_list)
else:
return dict(keyval_list)
|
def hierarchical_map_vals(func, node, max_depth=None, depth=0):
"\n node is a dict tree like structure with leaves of type list\n\n TODO: move to util_dict\n\n CommandLine:\n python -m utool.util_dict --exec-hierarchical_map_vals\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]\n >>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]\n >>> tree = ut.hierarchical_group_items(item_list, groupids_list)\n >>> len_tree = ut.hierarchical_map_vals(len, tree)\n >>> result = ('len_tree = ' + ut.repr4(len_tree, nl=1))\n >>> print(result)\n len_tree = {\n 1: {1: 1, 2: 1, 3: 2},\n 2: {1: 2, 2: 2},\n }\n\n Example1:\n >>> # DISABLE_DOCTEST\n >>> # UNSTABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> depth = 4\n >>> item_list = list(range(2 ** (depth + 1)))\n >>> num = len(item_list) // 2\n >>> groupids_list = []\n >>> total = 0\n >>> for level in range(depth):\n ... num2 = len(item_list) // int((num * 2))\n ... #nonflat_levelids = [([total + 2 * x + 1] * num + [total + 2 * x + 2] * num) for x in range(num2)]\n ... nonflat_levelids = [([1] * num + [2] * num) for x in range(num2)]\n ... levelids = ut.flatten(nonflat_levelids)\n ... groupids_list.append(levelids)\n ... total += num2 * 2\n ... num //= 2\n >>> print('groupids_list = %s' % (ut.repr4(groupids_list, nl=1),))\n >>> print('depth = %r' % (len(groupids_list),))\n >>> tree = ut.hierarchical_group_items(item_list, groupids_list)\n >>> print('tree = ' + ut.repr4(tree, nl=None))\n >>> flat_tree_values = list(ut.iflatten_dict_values(tree))\n >>> assert sorted(flat_tree_values) == sorted(item_list)\n >>> print('flat_tree_values = ' + str(flat_tree_values))\n >>> #print('flat_tree_keys = ' + str(list(ut.iflatten_dict_keys(tree))))\n >>> #print('iflatten_dict_items = ' + str(list(ut.iflatten_dict_items(tree))))\n >>> len_tree = ut.hierarchical_map_vals(len, tree, max_depth=4)\n >>> result = ('len_tree = ' + ut.repr4(len_tree, nl=None))\n >>> print(result)\n\n "
if (not hasattr(node, 'items')):
return func(node)
elif ((max_depth is not None) and (depth >= max_depth)):
return map_dict_vals(func, node)
else:
keyval_list = [(key, hierarchical_map_vals(func, val, max_depth, (depth + 1))) for (key, val) in node.items()]
if isinstance(node, OrderedDict):
return OrderedDict(keyval_list)
else:
return dict(keyval_list)<|docstring|>node is a dict tree like structure with leaves of type list
TODO: move to util_dict
CommandLine:
python -m utool.util_dict --exec-hierarchical_map_vals
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4, 5, 6, 7, 8]
>>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]]
>>> tree = ut.hierarchical_group_items(item_list, groupids_list)
>>> len_tree = ut.hierarchical_map_vals(len, tree)
>>> result = ('len_tree = ' + ut.repr4(len_tree, nl=1))
>>> print(result)
len_tree = {
1: {1: 1, 2: 1, 3: 2},
2: {1: 2, 2: 2},
}
Example1:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> depth = 4
>>> item_list = list(range(2 ** (depth + 1)))
>>> num = len(item_list) // 2
>>> groupids_list = []
>>> total = 0
>>> for level in range(depth):
... num2 = len(item_list) // int((num * 2))
... #nonflat_levelids = [([total + 2 * x + 1] * num + [total + 2 * x + 2] * num) for x in range(num2)]
... nonflat_levelids = [([1] * num + [2] * num) for x in range(num2)]
... levelids = ut.flatten(nonflat_levelids)
... groupids_list.append(levelids)
... total += num2 * 2
... num //= 2
>>> print('groupids_list = %s' % (ut.repr4(groupids_list, nl=1),))
>>> print('depth = %r' % (len(groupids_list),))
>>> tree = ut.hierarchical_group_items(item_list, groupids_list)
>>> print('tree = ' + ut.repr4(tree, nl=None))
>>> flat_tree_values = list(ut.iflatten_dict_values(tree))
>>> assert sorted(flat_tree_values) == sorted(item_list)
>>> print('flat_tree_values = ' + str(flat_tree_values))
>>> #print('flat_tree_keys = ' + str(list(ut.iflatten_dict_keys(tree))))
>>> #print('iflatten_dict_items = ' + str(list(ut.iflatten_dict_items(tree))))
>>> len_tree = ut.hierarchical_map_vals(len, tree, max_depth=4)
>>> result = ('len_tree = ' + ut.repr4(len_tree, nl=None))
>>> print(result)<|endoftext|>
|
3a79f9f647e84cf0c7114012017bd443a2121e67dccd75409a8abc5bd695fd47
|
def move_odict_item(odict, key, newpos):
"\n References:\n http://stackoverflow.com/questions/22663966/changing-order-of-ordered-dictionary-in-python\n\n CommandLine:\n python -m utool.util_dict --exec-move_odict_item\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> odict = OrderedDict()\n >>> odict['a'] = 1\n >>> odict['b'] = 2\n >>> odict['c'] = 3\n >>> odict['e'] = 5\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'c', 1)\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'a', 3)\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'a', 0)\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'b', 2)\n >>> result = ut.repr4(odict, nl=False)\n >>> print(result)\n {'a': 1, 'c': 3, 'b': 2, 'e': 5}\n "
odict[key] = odict.pop(key)
for (i, otherkey) in enumerate(list(odict.keys())):
if ((otherkey != key) and (i >= newpos)):
odict[otherkey] = odict.pop(otherkey)
return odict
|
References:
http://stackoverflow.com/questions/22663966/changing-order-of-ordered-dictionary-in-python
CommandLine:
python -m utool.util_dict --exec-move_odict_item
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> odict = OrderedDict()
>>> odict['a'] = 1
>>> odict['b'] = 2
>>> odict['c'] = 3
>>> odict['e'] = 5
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'c', 1)
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'a', 3)
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'a', 0)
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'b', 2)
>>> result = ut.repr4(odict, nl=False)
>>> print(result)
{'a': 1, 'c': 3, 'b': 2, 'e': 5}
|
utool/util_dict.py
|
move_odict_item
|
Erotemic/utool
| 8
|
python
|
def move_odict_item(odict, key, newpos):
"\n References:\n http://stackoverflow.com/questions/22663966/changing-order-of-ordered-dictionary-in-python\n\n CommandLine:\n python -m utool.util_dict --exec-move_odict_item\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> odict = OrderedDict()\n >>> odict['a'] = 1\n >>> odict['b'] = 2\n >>> odict['c'] = 3\n >>> odict['e'] = 5\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'c', 1)\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'a', 3)\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'a', 0)\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'b', 2)\n >>> result = ut.repr4(odict, nl=False)\n >>> print(result)\n {'a': 1, 'c': 3, 'b': 2, 'e': 5}\n "
odict[key] = odict.pop(key)
for (i, otherkey) in enumerate(list(odict.keys())):
if ((otherkey != key) and (i >= newpos)):
odict[otherkey] = odict.pop(otherkey)
return odict
|
def move_odict_item(odict, key, newpos):
"\n References:\n http://stackoverflow.com/questions/22663966/changing-order-of-ordered-dictionary-in-python\n\n CommandLine:\n python -m utool.util_dict --exec-move_odict_item\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> odict = OrderedDict()\n >>> odict['a'] = 1\n >>> odict['b'] = 2\n >>> odict['c'] = 3\n >>> odict['e'] = 5\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'c', 1)\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'a', 3)\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'a', 0)\n >>> print(ut.repr4(odict, nl=False))\n >>> move_odict_item(odict, 'b', 2)\n >>> result = ut.repr4(odict, nl=False)\n >>> print(result)\n {'a': 1, 'c': 3, 'b': 2, 'e': 5}\n "
odict[key] = odict.pop(key)
for (i, otherkey) in enumerate(list(odict.keys())):
if ((otherkey != key) and (i >= newpos)):
odict[otherkey] = odict.pop(otherkey)
return odict<|docstring|>References:
http://stackoverflow.com/questions/22663966/changing-order-of-ordered-dictionary-in-python
CommandLine:
python -m utool.util_dict --exec-move_odict_item
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> odict = OrderedDict()
>>> odict['a'] = 1
>>> odict['b'] = 2
>>> odict['c'] = 3
>>> odict['e'] = 5
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'c', 1)
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'a', 3)
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'a', 0)
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'b', 2)
>>> result = ut.repr4(odict, nl=False)
>>> print(result)
{'a': 1, 'c': 3, 'b': 2, 'e': 5}<|endoftext|>
|
b4313b286875ff6ef662b5ef06138d524c5a4edd221dd122cbc07870e99de4c9
|
def sort_dict(dict_, part='keys', key=None, reverse=False):
"\n sorts a dictionary by its values or its keys\n\n Args:\n dict_ (dict_): a dictionary\n part (str): specifies to sort by keys or values\n key (Optional[func]): a function that takes specified part\n and returns a sortable value\n reverse (bool): (Defaults to False) - True for descinding order. False\n for ascending order.\n\n Returns:\n OrderedDict: sorted dictionary\n\n CommandLine:\n python -m utool.util_dict sort_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': 3, 'c': 2, 'b': 1}\n >>> results = []\n >>> results.append(sort_dict(dict_, 'keys'))\n >>> results.append(sort_dict(dict_, 'vals'))\n >>> results.append(sort_dict(dict_, 'vals', lambda x: -x))\n >>> result = ut.repr4(results)\n >>> print(result)\n [\n {'a': 3, 'b': 1, 'c': 2},\n {'b': 1, 'c': 2, 'a': 3},\n {'a': 3, 'c': 2, 'b': 1},\n ]\n "
if (part == 'keys'):
index = 0
elif (part in {'vals', 'values'}):
index = 1
else:
raise ValueError(('Unknown method part=%r' % (part,)))
if (key is None):
_key = op.itemgetter(index)
else:
def _key(item):
return key(item[index])
sorted_items = sorted(six.iteritems(dict_), key=_key, reverse=reverse)
sorted_dict = OrderedDict(sorted_items)
return sorted_dict
|
sorts a dictionary by its values or its keys
Args:
dict_ (dict_): a dictionary
part (str): specifies to sort by keys or values
key (Optional[func]): a function that takes specified part
and returns a sortable value
reverse (bool): (Defaults to False) - True for descinding order. False
for ascending order.
Returns:
OrderedDict: sorted dictionary
CommandLine:
python -m utool.util_dict sort_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': 3, 'c': 2, 'b': 1}
>>> results = []
>>> results.append(sort_dict(dict_, 'keys'))
>>> results.append(sort_dict(dict_, 'vals'))
>>> results.append(sort_dict(dict_, 'vals', lambda x: -x))
>>> result = ut.repr4(results)
>>> print(result)
[
{'a': 3, 'b': 1, 'c': 2},
{'b': 1, 'c': 2, 'a': 3},
{'a': 3, 'c': 2, 'b': 1},
]
|
utool/util_dict.py
|
sort_dict
|
Erotemic/utool
| 8
|
python
|
def sort_dict(dict_, part='keys', key=None, reverse=False):
"\n sorts a dictionary by its values or its keys\n\n Args:\n dict_ (dict_): a dictionary\n part (str): specifies to sort by keys or values\n key (Optional[func]): a function that takes specified part\n and returns a sortable value\n reverse (bool): (Defaults to False) - True for descinding order. False\n for ascending order.\n\n Returns:\n OrderedDict: sorted dictionary\n\n CommandLine:\n python -m utool.util_dict sort_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': 3, 'c': 2, 'b': 1}\n >>> results = []\n >>> results.append(sort_dict(dict_, 'keys'))\n >>> results.append(sort_dict(dict_, 'vals'))\n >>> results.append(sort_dict(dict_, 'vals', lambda x: -x))\n >>> result = ut.repr4(results)\n >>> print(result)\n [\n {'a': 3, 'b': 1, 'c': 2},\n {'b': 1, 'c': 2, 'a': 3},\n {'a': 3, 'c': 2, 'b': 1},\n ]\n "
if (part == 'keys'):
index = 0
elif (part in {'vals', 'values'}):
index = 1
else:
raise ValueError(('Unknown method part=%r' % (part,)))
if (key is None):
_key = op.itemgetter(index)
else:
def _key(item):
return key(item[index])
sorted_items = sorted(six.iteritems(dict_), key=_key, reverse=reverse)
sorted_dict = OrderedDict(sorted_items)
return sorted_dict
|
def sort_dict(dict_, part='keys', key=None, reverse=False):
"\n sorts a dictionary by its values or its keys\n\n Args:\n dict_ (dict_): a dictionary\n part (str): specifies to sort by keys or values\n key (Optional[func]): a function that takes specified part\n and returns a sortable value\n reverse (bool): (Defaults to False) - True for descinding order. False\n for ascending order.\n\n Returns:\n OrderedDict: sorted dictionary\n\n CommandLine:\n python -m utool.util_dict sort_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': 3, 'c': 2, 'b': 1}\n >>> results = []\n >>> results.append(sort_dict(dict_, 'keys'))\n >>> results.append(sort_dict(dict_, 'vals'))\n >>> results.append(sort_dict(dict_, 'vals', lambda x: -x))\n >>> result = ut.repr4(results)\n >>> print(result)\n [\n {'a': 3, 'b': 1, 'c': 2},\n {'b': 1, 'c': 2, 'a': 3},\n {'a': 3, 'c': 2, 'b': 1},\n ]\n "
if (part == 'keys'):
index = 0
elif (part in {'vals', 'values'}):
index = 1
else:
raise ValueError(('Unknown method part=%r' % (part,)))
if (key is None):
_key = op.itemgetter(index)
else:
def _key(item):
return key(item[index])
sorted_items = sorted(six.iteritems(dict_), key=_key, reverse=reverse)
sorted_dict = OrderedDict(sorted_items)
return sorted_dict<|docstring|>sorts a dictionary by its values or its keys
Args:
dict_ (dict_): a dictionary
part (str): specifies to sort by keys or values
key (Optional[func]): a function that takes specified part
and returns a sortable value
reverse (bool): (Defaults to False) - True for descinding order. False
for ascending order.
Returns:
OrderedDict: sorted dictionary
CommandLine:
python -m utool.util_dict sort_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': 3, 'c': 2, 'b': 1}
>>> results = []
>>> results.append(sort_dict(dict_, 'keys'))
>>> results.append(sort_dict(dict_, 'vals'))
>>> results.append(sort_dict(dict_, 'vals', lambda x: -x))
>>> result = ut.repr4(results)
>>> print(result)
[
{'a': 3, 'b': 1, 'c': 2},
{'b': 1, 'c': 2, 'a': 3},
{'a': 3, 'c': 2, 'b': 1},
]<|endoftext|>
|
2a4905064cd97a708e5e81d71ede695a4854a806102e8a2ccbaa555e2f7b0be4
|
def order_dict_by(dict_, key_order):
"\n Reorders items in a dictionary according to a custom key order\n\n Args:\n dict_ (dict_): a dictionary\n key_order (list): custom key order\n\n Returns:\n OrderedDict: sorted_dict\n\n CommandLine:\n python -m utool.util_dict --exec-order_dict_by\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4}\n >>> key_order = [4, 2, 3, 1]\n >>> sorted_dict = order_dict_by(dict_, key_order)\n >>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),))\n >>> print(result)\n >>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}'\n\n "
dict_keys = set(dict_.keys())
other_keys = (dict_keys - set(key_order))
key_order = it.chain(key_order, other_keys)
sorted_dict = OrderedDict(((key, dict_[key]) for key in key_order if (key in dict_keys)))
return sorted_dict
|
Reorders items in a dictionary according to a custom key order
Args:
dict_ (dict_): a dictionary
key_order (list): custom key order
Returns:
OrderedDict: sorted_dict
CommandLine:
python -m utool.util_dict --exec-order_dict_by
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4}
>>> key_order = [4, 2, 3, 1]
>>> sorted_dict = order_dict_by(dict_, key_order)
>>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),))
>>> print(result)
>>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}'
|
utool/util_dict.py
|
order_dict_by
|
Erotemic/utool
| 8
|
python
|
def order_dict_by(dict_, key_order):
"\n Reorders items in a dictionary according to a custom key order\n\n Args:\n dict_ (dict_): a dictionary\n key_order (list): custom key order\n\n Returns:\n OrderedDict: sorted_dict\n\n CommandLine:\n python -m utool.util_dict --exec-order_dict_by\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4}\n >>> key_order = [4, 2, 3, 1]\n >>> sorted_dict = order_dict_by(dict_, key_order)\n >>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),))\n >>> print(result)\n >>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}'\n\n "
dict_keys = set(dict_.keys())
other_keys = (dict_keys - set(key_order))
key_order = it.chain(key_order, other_keys)
sorted_dict = OrderedDict(((key, dict_[key]) for key in key_order if (key in dict_keys)))
return sorted_dict
|
def order_dict_by(dict_, key_order):
"\n Reorders items in a dictionary according to a custom key order\n\n Args:\n dict_ (dict_): a dictionary\n key_order (list): custom key order\n\n Returns:\n OrderedDict: sorted_dict\n\n CommandLine:\n python -m utool.util_dict --exec-order_dict_by\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4}\n >>> key_order = [4, 2, 3, 1]\n >>> sorted_dict = order_dict_by(dict_, key_order)\n >>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),))\n >>> print(result)\n >>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}'\n\n "
dict_keys = set(dict_.keys())
other_keys = (dict_keys - set(key_order))
key_order = it.chain(key_order, other_keys)
sorted_dict = OrderedDict(((key, dict_[key]) for key in key_order if (key in dict_keys)))
return sorted_dict<|docstring|>Reorders items in a dictionary according to a custom key order
Args:
dict_ (dict_): a dictionary
key_order (list): custom key order
Returns:
OrderedDict: sorted_dict
CommandLine:
python -m utool.util_dict --exec-order_dict_by
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4}
>>> key_order = [4, 2, 3, 1]
>>> sorted_dict = order_dict_by(dict_, key_order)
>>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),))
>>> print(result)
>>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}'<|endoftext|>
|
def7f4137e5c27350af6c5f2548341a7fc732960f88fcc57359c26a9402cdeb2
|
def iteritems_sorted(dict_):
' change to iteritems ordered '
if isinstance(dict_, OrderedDict):
return six.iteritems(dict_)
else:
return iter(sorted(six.iteritems(dict_)))
|
change to iteritems ordered
|
utool/util_dict.py
|
iteritems_sorted
|
Erotemic/utool
| 8
|
python
|
def iteritems_sorted(dict_):
' '
if isinstance(dict_, OrderedDict):
return six.iteritems(dict_)
else:
return iter(sorted(six.iteritems(dict_)))
|
def iteritems_sorted(dict_):
' '
if isinstance(dict_, OrderedDict):
return six.iteritems(dict_)
else:
return iter(sorted(six.iteritems(dict_)))<|docstring|>change to iteritems ordered<|endoftext|>
|
804e23cfc65a67818a0a8e79a04a4d2651bd4db57819e4e8618349e680cdb64e
|
def flatten_dict_vals(dict_):
'\n Flattens only values in a heirarchical dictionary, keys are nested.\n '
if isinstance(dict_, dict):
return dict([((key, augkey), augval) for (key, val) in dict_.items() for (augkey, augval) in flatten_dict_vals(val).items()])
else:
return {None: dict_}
|
Flattens only values in a heirarchical dictionary, keys are nested.
|
utool/util_dict.py
|
flatten_dict_vals
|
Erotemic/utool
| 8
|
python
|
def flatten_dict_vals(dict_):
'\n \n '
if isinstance(dict_, dict):
return dict([((key, augkey), augval) for (key, val) in dict_.items() for (augkey, augval) in flatten_dict_vals(val).items()])
else:
return {None: dict_}
|
def flatten_dict_vals(dict_):
'\n \n '
if isinstance(dict_, dict):
return dict([((key, augkey), augval) for (key, val) in dict_.items() for (augkey, augval) in flatten_dict_vals(val).items()])
else:
return {None: dict_}<|docstring|>Flattens only values in a heirarchical dictionary, keys are nested.<|endoftext|>
|
ca7765a65929a2d860975977f9ac8789878197bc97be1a977ae58421e000104c
|
def flatten_dict_items(dict_):
"\n Flattens keys / values in a heirarchical dictionary\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4]\n >>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]\n >>> dict_ = hierarchical_group_items(item_list, groupids_list)\n >>> flatter_dict = flatten_dict_items(dict_)\n >>> result = ('flatter_dict = ' + ut.repr4(flatter_dict, nl=1))\n >>> print(result)\n flatter_dict = {\n (1, 1, 1): [1],\n (1, 2, 1): [3],\n (1, 2, 3): [2],\n (2, 2, 1): [4],\n }\n "
import utool as ut
flat_dict = ut.flatten_dict_vals(dict_)
flatter_dict = dict([(tuple(ut.unpack_iterables(key)[:(- 1)]), val) for (key, val) in flat_dict.items()])
return flatter_dict
|
Flattens keys / values in a heirarchical dictionary
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4]
>>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]
>>> dict_ = hierarchical_group_items(item_list, groupids_list)
>>> flatter_dict = flatten_dict_items(dict_)
>>> result = ('flatter_dict = ' + ut.repr4(flatter_dict, nl=1))
>>> print(result)
flatter_dict = {
(1, 1, 1): [1],
(1, 2, 1): [3],
(1, 2, 3): [2],
(2, 2, 1): [4],
}
|
utool/util_dict.py
|
flatten_dict_items
|
Erotemic/utool
| 8
|
python
|
def flatten_dict_items(dict_):
"\n Flattens keys / values in a heirarchical dictionary\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4]\n >>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]\n >>> dict_ = hierarchical_group_items(item_list, groupids_list)\n >>> flatter_dict = flatten_dict_items(dict_)\n >>> result = ('flatter_dict = ' + ut.repr4(flatter_dict, nl=1))\n >>> print(result)\n flatter_dict = {\n (1, 1, 1): [1],\n (1, 2, 1): [3],\n (1, 2, 3): [2],\n (2, 2, 1): [4],\n }\n "
import utool as ut
flat_dict = ut.flatten_dict_vals(dict_)
flatter_dict = dict([(tuple(ut.unpack_iterables(key)[:(- 1)]), val) for (key, val) in flat_dict.items()])
return flatter_dict
|
def flatten_dict_items(dict_):
"\n Flattens keys / values in a heirarchical dictionary\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 3, 4]\n >>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]\n >>> dict_ = hierarchical_group_items(item_list, groupids_list)\n >>> flatter_dict = flatten_dict_items(dict_)\n >>> result = ('flatter_dict = ' + ut.repr4(flatter_dict, nl=1))\n >>> print(result)\n flatter_dict = {\n (1, 1, 1): [1],\n (1, 2, 1): [3],\n (1, 2, 3): [2],\n (2, 2, 1): [4],\n }\n "
import utool as ut
flat_dict = ut.flatten_dict_vals(dict_)
flatter_dict = dict([(tuple(ut.unpack_iterables(key)[:(- 1)]), val) for (key, val) in flat_dict.items()])
return flatter_dict<|docstring|>Flattens keys / values in a heirarchical dictionary
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4]
>>> groupids_list = [[1, 1, 1, 2], [1, 2, 2, 2], [1, 3, 1, 1]]
>>> dict_ = hierarchical_group_items(item_list, groupids_list)
>>> flatter_dict = flatten_dict_items(dict_)
>>> result = ('flatter_dict = ' + ut.repr4(flatter_dict, nl=1))
>>> print(result)
flatter_dict = {
(1, 1, 1): [1],
(1, 2, 1): [3],
(1, 2, 3): [2],
(2, 2, 1): [4],
}<|endoftext|>
|
a5f59acb46b369d7aa9624d40a1c4bbce2773f6cd67a5cccced2d12d86db7cda
|
def depth_atleast(list_, depth):
'\n Returns if depth of list is at least ``depth``\n\n Args:\n list_ (list):\n depth (int):\n\n Returns:\n bool: True\n\n CommandLine:\n python -m utool.util_dict --exec-depth_atleast --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> list_ = [[[[0]]], [[0]]]\n >>> depth = 0\n >>> result = [depth_atleast(list_, depth) for depth in range(0, 7)]\n >>> print(result)\n '
if (depth == 0):
return True
else:
try:
return all([depth_atleast(item, (depth - 1)) for item in list_])
except TypeError:
return False
|
Returns if depth of list is at least ``depth``
Args:
list_ (list):
depth (int):
Returns:
bool: True
CommandLine:
python -m utool.util_dict --exec-depth_atleast --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> list_ = [[[[0]]], [[0]]]
>>> depth = 0
>>> result = [depth_atleast(list_, depth) for depth in range(0, 7)]
>>> print(result)
|
utool/util_dict.py
|
depth_atleast
|
Erotemic/utool
| 8
|
python
|
def depth_atleast(list_, depth):
'\n Returns if depth of list is at least ``depth``\n\n Args:\n list_ (list):\n depth (int):\n\n Returns:\n bool: True\n\n CommandLine:\n python -m utool.util_dict --exec-depth_atleast --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> list_ = [[[[0]]], [[0]]]\n >>> depth = 0\n >>> result = [depth_atleast(list_, depth) for depth in range(0, 7)]\n >>> print(result)\n '
if (depth == 0):
return True
else:
try:
return all([depth_atleast(item, (depth - 1)) for item in list_])
except TypeError:
return False
|
def depth_atleast(list_, depth):
'\n Returns if depth of list is at least ``depth``\n\n Args:\n list_ (list):\n depth (int):\n\n Returns:\n bool: True\n\n CommandLine:\n python -m utool.util_dict --exec-depth_atleast --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> list_ = [[[[0]]], [[0]]]\n >>> depth = 0\n >>> result = [depth_atleast(list_, depth) for depth in range(0, 7)]\n >>> print(result)\n '
if (depth == 0):
return True
else:
try:
return all([depth_atleast(item, (depth - 1)) for item in list_])
except TypeError:
return False<|docstring|>Returns if depth of list is at least ``depth``
Args:
list_ (list):
depth (int):
Returns:
bool: True
CommandLine:
python -m utool.util_dict --exec-depth_atleast --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> list_ = [[[[0]]], [[0]]]
>>> depth = 0
>>> result = [depth_atleast(list_, depth) for depth in range(0, 7)]
>>> print(result)<|endoftext|>
|
c4371a3e7ee3792d990190fea7534d3db88d4ab7a390118692756aefe108937c
|
def HNR_RJT(signal, sr, n_fft):
'\n HNR extraction -> https://www.scitepress.org/Papers/2009/15529/15529.pdf\n A NEW ACCURATE METHOD OF HARMONIC-TO-NOISERATIO EXTRACTION\n Ricardo J. T. de Sousa - School of Engineering , University of Porto, Rua Roberto Frias, Porto, Portugal\n Robert Komar implementation 2021\n '
h_range = (n_fft // 2)
s = np.abs(librosa.stft(signal, n_fft=n_fft))
fft_freqs = librosa.fft_frequencies(sr=sr)
s_harm = librosa.interp_harmonics(s, fft_freqs, range(h_range), axis=0)
noise_spec = (s[h_range:] - s_harm)
return (10 * np.log((np.sum((s_harm ** 2)) / np.sum((noise_spec ** 2)))))
|
HNR extraction -> https://www.scitepress.org/Papers/2009/15529/15529.pdf
A NEW ACCURATE METHOD OF HARMONIC-TO-NOISERATIO EXTRACTION
Ricardo J. T. de Sousa - School of Engineering , University of Porto, Rua Roberto Frias, Porto, Portugal
Robert Komar implementation 2021
|
src/data/make_dataset.py
|
HNR_RJT
|
RbKomar/intonat.dl
| 0
|
python
|
def HNR_RJT(signal, sr, n_fft):
'\n HNR extraction -> https://www.scitepress.org/Papers/2009/15529/15529.pdf\n A NEW ACCURATE METHOD OF HARMONIC-TO-NOISERATIO EXTRACTION\n Ricardo J. T. de Sousa - School of Engineering , University of Porto, Rua Roberto Frias, Porto, Portugal\n Robert Komar implementation 2021\n '
h_range = (n_fft // 2)
s = np.abs(librosa.stft(signal, n_fft=n_fft))
fft_freqs = librosa.fft_frequencies(sr=sr)
s_harm = librosa.interp_harmonics(s, fft_freqs, range(h_range), axis=0)
noise_spec = (s[h_range:] - s_harm)
return (10 * np.log((np.sum((s_harm ** 2)) / np.sum((noise_spec ** 2)))))
|
def HNR_RJT(signal, sr, n_fft):
'\n HNR extraction -> https://www.scitepress.org/Papers/2009/15529/15529.pdf\n A NEW ACCURATE METHOD OF HARMONIC-TO-NOISERATIO EXTRACTION\n Ricardo J. T. de Sousa - School of Engineering , University of Porto, Rua Roberto Frias, Porto, Portugal\n Robert Komar implementation 2021\n '
h_range = (n_fft // 2)
s = np.abs(librosa.stft(signal, n_fft=n_fft))
fft_freqs = librosa.fft_frequencies(sr=sr)
s_harm = librosa.interp_harmonics(s, fft_freqs, range(h_range), axis=0)
noise_spec = (s[h_range:] - s_harm)
return (10 * np.log((np.sum((s_harm ** 2)) / np.sum((noise_spec ** 2)))))<|docstring|>HNR extraction -> https://www.scitepress.org/Papers/2009/15529/15529.pdf
A NEW ACCURATE METHOD OF HARMONIC-TO-NOISERATIO EXTRACTION
Ricardo J. T. de Sousa - School of Engineering , University of Porto, Rua Roberto Frias, Porto, Portugal
Robert Komar implementation 2021<|endoftext|>
|
801b2443aecc963c5634b2b92ecd914996cc3e92c2cde33c2b79eaab346a965f
|
def get_features_multiprocessing(name, folder_path, file_path):
'\n num_segments: int - in deep learning we do need a lot of data so we will cut the audio into the segments\n the function written with help of https://youtu.be/szyGiObZymo\n '
start_timer = time.perf_counter()
mfcc_segments = []
delta_mfcc_segments = []
delta2_mfcc_segments = []
fundamental_frequency_segments = []
hnr_segments = []
filename = ((folder_path + '\\\\') + file_path)
age = get_person_age(name, file_path)
logger.info('GET FEATURES | %s/%d: starting feature collection', name, age)
(signal, sr) = librosa.load(filename)
hop_length = 512
n_fft = 2048
duration = librosa.get_duration(signal, sr=sr)
num_segments = min(max(10, math.ceil(((1 / 6) * duration))), 100)
num_samples_per_segment = int(((sr * duration) / num_segments))
expected_vectors_per_segment = math.ceil((num_samples_per_segment / hop_length))
for s in range(num_segments):
start_sample = (num_samples_per_segment * s)
finish_sample = (start_sample + num_samples_per_segment)
sampled_signal = signal[start_sample:finish_sample]
mfcc = librosa.feature.mfcc(sampled_signal, n_fft=n_fft, hop_length=hop_length, n_mfcc=15)
delta_mfcc = librosa.feature.delta(mfcc)
delta2_mfcc = librosa.feature.delta(mfcc, order=2)
mfcc = mfcc.T
delta_mfcc = delta_mfcc.T
delta2_mfcc = delta2_mfcc.T
if (len(mfcc) == expected_vectors_per_segment):
mfcc_segments.append(mfcc.tolist())
delta_mfcc_segments.append(delta_mfcc.tolist())
delta2_mfcc_segments.append(delta2_mfcc.tolist())
(fundamental_frequency, _, _) = librosa.pyin(sampled_signal, fmin=librosa.note_to_hz('C2'), fmax=librosa.note_to_hz('C7'), hop_length=hop_length)
fundamental_frequency_segments.append(np.mean(fundamental_frequency))
fundamental_frequency = fundamental_frequency[(~ np.isnan(fundamental_frequency))]
pitch_period = (1.0 / np.mean(fundamental_frequency))
hnr = HNR(sampled_signal, sr, pitch_period)
hnr_segments.append(hnr)
lock.acquire()
with open('D:\\PROJEKTY\\intonat.dl\\data\\interim\\data.json', 'r+') as fp:
try:
data = json.load(fp)
except json.decoder.JSONDecodeError:
data = {'name': [], 'sr': [], 'age': [], 'fundamental_frequency': [], 'mfcc': [], 'delta_mfcc': [], 'delta2_mfcc': [], 'hnr': []}
data['name'].append(name)
data['sr'].append(sr)
data['age'].append(age)
data['mfcc'].append(mfcc_segments)
data['delta_mfcc'].append(delta_mfcc_segments)
data['delta2_mfcc'].append(delta2_mfcc_segments)
data['fundamental_frequency'].append(fundamental_frequency_segments)
data['hnr'].append(hnr_segments)
fp.seek(0)
json.dump(data, fp, indent=4)
fp.truncate()
lock.release()
end_timer = time.perf_counter()
logger.info('GET FEATURES | %s/%d: finishing feature collection with time: %0.2f s', name, age, (end_timer - start_timer))
|
num_segments: int - in deep learning we do need a lot of data so we will cut the audio into the segments
the function written with help of https://youtu.be/szyGiObZymo
|
src/data/make_dataset.py
|
get_features_multiprocessing
|
RbKomar/intonat.dl
| 0
|
python
|
def get_features_multiprocessing(name, folder_path, file_path):
'\n num_segments: int - in deep learning we do need a lot of data so we will cut the audio into the segments\n the function written with help of https://youtu.be/szyGiObZymo\n '
start_timer = time.perf_counter()
mfcc_segments = []
delta_mfcc_segments = []
delta2_mfcc_segments = []
fundamental_frequency_segments = []
hnr_segments = []
filename = ((folder_path + '\\\\') + file_path)
age = get_person_age(name, file_path)
logger.info('GET FEATURES | %s/%d: starting feature collection', name, age)
(signal, sr) = librosa.load(filename)
hop_length = 512
n_fft = 2048
duration = librosa.get_duration(signal, sr=sr)
num_segments = min(max(10, math.ceil(((1 / 6) * duration))), 100)
num_samples_per_segment = int(((sr * duration) / num_segments))
expected_vectors_per_segment = math.ceil((num_samples_per_segment / hop_length))
for s in range(num_segments):
start_sample = (num_samples_per_segment * s)
finish_sample = (start_sample + num_samples_per_segment)
sampled_signal = signal[start_sample:finish_sample]
mfcc = librosa.feature.mfcc(sampled_signal, n_fft=n_fft, hop_length=hop_length, n_mfcc=15)
delta_mfcc = librosa.feature.delta(mfcc)
delta2_mfcc = librosa.feature.delta(mfcc, order=2)
mfcc = mfcc.T
delta_mfcc = delta_mfcc.T
delta2_mfcc = delta2_mfcc.T
if (len(mfcc) == expected_vectors_per_segment):
mfcc_segments.append(mfcc.tolist())
delta_mfcc_segments.append(delta_mfcc.tolist())
delta2_mfcc_segments.append(delta2_mfcc.tolist())
(fundamental_frequency, _, _) = librosa.pyin(sampled_signal, fmin=librosa.note_to_hz('C2'), fmax=librosa.note_to_hz('C7'), hop_length=hop_length)
fundamental_frequency_segments.append(np.mean(fundamental_frequency))
fundamental_frequency = fundamental_frequency[(~ np.isnan(fundamental_frequency))]
pitch_period = (1.0 / np.mean(fundamental_frequency))
hnr = HNR(sampled_signal, sr, pitch_period)
hnr_segments.append(hnr)
lock.acquire()
with open('D:\\PROJEKTY\\intonat.dl\\data\\interim\\data.json', 'r+') as fp:
try:
data = json.load(fp)
except json.decoder.JSONDecodeError:
data = {'name': [], 'sr': [], 'age': [], 'fundamental_frequency': [], 'mfcc': [], 'delta_mfcc': [], 'delta2_mfcc': [], 'hnr': []}
data['name'].append(name)
data['sr'].append(sr)
data['age'].append(age)
data['mfcc'].append(mfcc_segments)
data['delta_mfcc'].append(delta_mfcc_segments)
data['delta2_mfcc'].append(delta2_mfcc_segments)
data['fundamental_frequency'].append(fundamental_frequency_segments)
data['hnr'].append(hnr_segments)
fp.seek(0)
json.dump(data, fp, indent=4)
fp.truncate()
lock.release()
end_timer = time.perf_counter()
logger.info('GET FEATURES | %s/%d: finishing feature collection with time: %0.2f s', name, age, (end_timer - start_timer))
|
def get_features_multiprocessing(name, folder_path, file_path):
'\n num_segments: int - in deep learning we do need a lot of data so we will cut the audio into the segments\n the function written with help of https://youtu.be/szyGiObZymo\n '
start_timer = time.perf_counter()
mfcc_segments = []
delta_mfcc_segments = []
delta2_mfcc_segments = []
fundamental_frequency_segments = []
hnr_segments = []
filename = ((folder_path + '\\\\') + file_path)
age = get_person_age(name, file_path)
logger.info('GET FEATURES | %s/%d: starting feature collection', name, age)
(signal, sr) = librosa.load(filename)
hop_length = 512
n_fft = 2048
duration = librosa.get_duration(signal, sr=sr)
num_segments = min(max(10, math.ceil(((1 / 6) * duration))), 100)
num_samples_per_segment = int(((sr * duration) / num_segments))
expected_vectors_per_segment = math.ceil((num_samples_per_segment / hop_length))
for s in range(num_segments):
start_sample = (num_samples_per_segment * s)
finish_sample = (start_sample + num_samples_per_segment)
sampled_signal = signal[start_sample:finish_sample]
mfcc = librosa.feature.mfcc(sampled_signal, n_fft=n_fft, hop_length=hop_length, n_mfcc=15)
delta_mfcc = librosa.feature.delta(mfcc)
delta2_mfcc = librosa.feature.delta(mfcc, order=2)
mfcc = mfcc.T
delta_mfcc = delta_mfcc.T
delta2_mfcc = delta2_mfcc.T
if (len(mfcc) == expected_vectors_per_segment):
mfcc_segments.append(mfcc.tolist())
delta_mfcc_segments.append(delta_mfcc.tolist())
delta2_mfcc_segments.append(delta2_mfcc.tolist())
(fundamental_frequency, _, _) = librosa.pyin(sampled_signal, fmin=librosa.note_to_hz('C2'), fmax=librosa.note_to_hz('C7'), hop_length=hop_length)
fundamental_frequency_segments.append(np.mean(fundamental_frequency))
fundamental_frequency = fundamental_frequency[(~ np.isnan(fundamental_frequency))]
pitch_period = (1.0 / np.mean(fundamental_frequency))
hnr = HNR(sampled_signal, sr, pitch_period)
hnr_segments.append(hnr)
lock.acquire()
with open('D:\\PROJEKTY\\intonat.dl\\data\\interim\\data.json', 'r+') as fp:
try:
data = json.load(fp)
except json.decoder.JSONDecodeError:
data = {'name': [], 'sr': [], 'age': [], 'fundamental_frequency': [], 'mfcc': [], 'delta_mfcc': [], 'delta2_mfcc': [], 'hnr': []}
data['name'].append(name)
data['sr'].append(sr)
data['age'].append(age)
data['mfcc'].append(mfcc_segments)
data['delta_mfcc'].append(delta_mfcc_segments)
data['delta2_mfcc'].append(delta2_mfcc_segments)
data['fundamental_frequency'].append(fundamental_frequency_segments)
data['hnr'].append(hnr_segments)
fp.seek(0)
json.dump(data, fp, indent=4)
fp.truncate()
lock.release()
end_timer = time.perf_counter()
logger.info('GET FEATURES | %s/%d: finishing feature collection with time: %0.2f s', name, age, (end_timer - start_timer))<|docstring|>num_segments: int - in deep learning we do need a lot of data so we will cut the audio into the segments
the function written with help of https://youtu.be/szyGiObZymo<|endoftext|>
|
b87ce33f51ac2f360ff3649e8493933d7e7030d218ccb08cf005e91bcd6409c1
|
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True), default='D:\\PROJEKTY\\intonat.dl\\data\\raw\\TCDSA_main')
@click.argument('output_filepath', type=click.Path(), default='D:\\PROJEKTY\\intonat.dl\\data\\interim\\data.json')
def main(input_filepath, output_filepath):
' Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n '
with open(output_filepath, 'r+') as f:
f.truncate(0)
logger.info('Starting data pre-processing')
start_timer = time.perf_counter()
read_data(input_filepath)
end_timer = time.perf_counter()
logger.info('Finishing data pre-processing in %d s', (end_timer - start_timer))
|
Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
|
src/data/make_dataset.py
|
main
|
RbKomar/intonat.dl
| 0
|
python
|
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True), default='D:\\PROJEKTY\\intonat.dl\\data\\raw\\TCDSA_main')
@click.argument('output_filepath', type=click.Path(), default='D:\\PROJEKTY\\intonat.dl\\data\\interim\\data.json')
def main(input_filepath, output_filepath):
' Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n '
with open(output_filepath, 'r+') as f:
f.truncate(0)
logger.info('Starting data pre-processing')
start_timer = time.perf_counter()
read_data(input_filepath)
end_timer = time.perf_counter()
logger.info('Finishing data pre-processing in %d s', (end_timer - start_timer))
|
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True), default='D:\\PROJEKTY\\intonat.dl\\data\\raw\\TCDSA_main')
@click.argument('output_filepath', type=click.Path(), default='D:\\PROJEKTY\\intonat.dl\\data\\interim\\data.json')
def main(input_filepath, output_filepath):
' Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n '
with open(output_filepath, 'r+') as f:
f.truncate(0)
logger.info('Starting data pre-processing')
start_timer = time.perf_counter()
read_data(input_filepath)
end_timer = time.perf_counter()
logger.info('Finishing data pre-processing in %d s', (end_timer - start_timer))<|docstring|>Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).<|endoftext|>
|
48f3af7f56969220a4bb552ba7e04b1eb1a4439cff2f7df83d1d54fc8ab44262
|
def get_language(tokens, stopwords):
'\n Extract the language from an array of tokens, it is based on stop words\n It takes two args:\n tokens -> list of words to check the language\n stopwords -> dict of set: {lang : set(language,specific,stop,words)}\n\n It sets a score to a language by intersection between tokens & stopwords\n by language. The highest score is considered as the main language of\n the list and is returned.\n\n improvement: if 2 language have equal score, can look at subject (too\n small for good detection) or define a default value\n '
languages_ratios = dict()
words = [str(word).lower() for word in tokens]
words_set = set(words)
for language in stopwords.keys():
common_elements = words_set.intersection(stopwords[language])
languages_ratios[language] = len(common_elements)
return max(languages_ratios, key=languages_ratios.get)
|
Extract the language from an array of tokens, it is based on stop words
It takes two args:
tokens -> list of words to check the language
stopwords -> dict of set: {lang : set(language,specific,stop,words)}
It sets a score to a language by intersection between tokens & stopwords
by language. The highest score is considered as the main language of
the list and is returned.
improvement: if 2 language have equal score, can look at subject (too
small for good detection) or define a default value
|
language_detection.py
|
get_language
|
Nedgang/adt_project
| 1
|
python
|
def get_language(tokens, stopwords):
'\n Extract the language from an array of tokens, it is based on stop words\n It takes two args:\n tokens -> list of words to check the language\n stopwords -> dict of set: {lang : set(language,specific,stop,words)}\n\n It sets a score to a language by intersection between tokens & stopwords\n by language. The highest score is considered as the main language of\n the list and is returned.\n\n improvement: if 2 language have equal score, can look at subject (too\n small for good detection) or define a default value\n '
languages_ratios = dict()
words = [str(word).lower() for word in tokens]
words_set = set(words)
for language in stopwords.keys():
common_elements = words_set.intersection(stopwords[language])
languages_ratios[language] = len(common_elements)
return max(languages_ratios, key=languages_ratios.get)
|
def get_language(tokens, stopwords):
'\n Extract the language from an array of tokens, it is based on stop words\n It takes two args:\n tokens -> list of words to check the language\n stopwords -> dict of set: {lang : set(language,specific,stop,words)}\n\n It sets a score to a language by intersection between tokens & stopwords\n by language. The highest score is considered as the main language of\n the list and is returned.\n\n improvement: if 2 language have equal score, can look at subject (too\n small for good detection) or define a default value\n '
languages_ratios = dict()
words = [str(word).lower() for word in tokens]
words_set = set(words)
for language in stopwords.keys():
common_elements = words_set.intersection(stopwords[language])
languages_ratios[language] = len(common_elements)
return max(languages_ratios, key=languages_ratios.get)<|docstring|>Extract the language from an array of tokens, it is based on stop words
It takes two args:
tokens -> list of words to check the language
stopwords -> dict of set: {lang : set(language,specific,stop,words)}
It sets a score to a language by intersection between tokens & stopwords
by language. The highest score is considered as the main language of
the list and is returned.
improvement: if 2 language have equal score, can look at subject (too
small for good detection) or define a default value<|endoftext|>
|
7396f576786b4f9b211ddee8efb1f2218b4bf6033f193caa51947ab9329aa2c8
|
def main():
'[main usually for testing\n '
pass
|
[main usually for testing
|
py4ami/pdfreader.py
|
main
|
petermr/pyami
| 6
|
python
|
def main():
'\n '
pass
|
def main():
'\n '
pass<|docstring|>[main usually for testing<|endoftext|>
|
47b64ee04a497140cdafe4ca7d25ea29023ae17cad7ed344bfba1f2b0534100e
|
@classmethod
def read_and_convert(cls, file):
' converst a PDF path (to text)\n\n Args:\n file ([str]): filename\n '
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
image_dir = Path('/Users/pm286/misc/images')
imageWriter = ImageWriter(image_dir)
layoutParams = LAParams()
converter = TextConverter(resource_manager, fake_file_handle, codec='utf-8', laparams=layoutParams, imagewriter=imageWriter)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
print(f' ====== PDF FILE {file} =====')
with open(file, 'rb') as fh:
for (i, page) in enumerate(PDFPage.get_pages(fh, caching=True, check_extractable=True)):
page_interpreter.process_page(page)
print(f'=================== page {(i + 1)}=================')
text = fake_file_handle.getvalue()
converter.close()
fake_file_handle.close()
'\n > pdf2txt.py [-P password] [-o output] [-t text|html|xml|tag]\n [-O output_dir] [-c encoding] [-s scale] [-R rotation]\n [-Y normal|loose|exact] [-p pagenos] [-m maxpages]\n [-S] [-C] [-n] [-A] [-V]\n [-M char_margin] [-L line_margin] [-W word_margin]\n [-F boxes_flow] [-d]\n input.pdf ...\n-P password : PDF password.\n-o output : Output path name.\n-t text|html|xml|tag : Output type. (default: automatically inferred from the output path name.)\n-O output_dir : Output directory for extracted images.\n-c encoding : Output encoding. (default: utf-8)\n-s scale : Output scale.\n-R rotation : Rotates the page in degree.\n-Y normal|loose|exact : Specifies the layout mode. (only for HTML output.)\n-p pagenos : Processes certain pages only.\n-m maxpages : Limits the number of maximum pages to process.\n-S : Strips control characters.\n-C : Disables resource caching.\n-n : Disables layout analysis.\n-A : Applies layout analysis for all texts including figures.\n-V : Automatically detects vertical writing.\n-M char_margin : Speficies the char margin.\n-W word_margin : Speficies the word margin.\n-L line_margin : Speficies the line margin.\n-F boxes_flow : Speficies the box flow ratio.\n-d : Turns on Debug output.\n\n '
print(f'''
......
{text[:100]}
...
{text[(- 100):]}
......
''')
return text
|
converst a PDF path (to text)
Args:
file ([str]): filename
|
py4ami/pdfreader.py
|
read_and_convert
|
petermr/pyami
| 6
|
python
|
@classmethod
def read_and_convert(cls, file):
' converst a PDF path (to text)\n\n Args:\n file ([str]): filename\n '
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
image_dir = Path('/Users/pm286/misc/images')
imageWriter = ImageWriter(image_dir)
layoutParams = LAParams()
converter = TextConverter(resource_manager, fake_file_handle, codec='utf-8', laparams=layoutParams, imagewriter=imageWriter)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
print(f' ====== PDF FILE {file} =====')
with open(file, 'rb') as fh:
for (i, page) in enumerate(PDFPage.get_pages(fh, caching=True, check_extractable=True)):
page_interpreter.process_page(page)
print(f'=================== page {(i + 1)}=================')
text = fake_file_handle.getvalue()
converter.close()
fake_file_handle.close()
'\n > pdf2txt.py [-P password] [-o output] [-t text|html|xml|tag]\n [-O output_dir] [-c encoding] [-s scale] [-R rotation]\n [-Y normal|loose|exact] [-p pagenos] [-m maxpages]\n [-S] [-C] [-n] [-A] [-V]\n [-M char_margin] [-L line_margin] [-W word_margin]\n [-F boxes_flow] [-d]\n input.pdf ...\n-P password : PDF password.\n-o output : Output path name.\n-t text|html|xml|tag : Output type. (default: automatically inferred from the output path name.)\n-O output_dir : Output directory for extracted images.\n-c encoding : Output encoding. (default: utf-8)\n-s scale : Output scale.\n-R rotation : Rotates the page in degree.\n-Y normal|loose|exact : Specifies the layout mode. (only for HTML output.)\n-p pagenos : Processes certain pages only.\n-m maxpages : Limits the number of maximum pages to process.\n-S : Strips control characters.\n-C : Disables resource caching.\n-n : Disables layout analysis.\n-A : Applies layout analysis for all texts including figures.\n-V : Automatically detects vertical writing.\n-M char_margin : Speficies the char margin.\n-W word_margin : Speficies the word margin.\n-L line_margin : Speficies the line margin.\n-F boxes_flow : Speficies the box flow ratio.\n-d : Turns on Debug output.\n\n '
print(f'
......
{text[:100]}
...
{text[(- 100):]}
......
')
return text
|
@classmethod
def read_and_convert(cls, file):
' converst a PDF path (to text)\n\n Args:\n file ([str]): filename\n '
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
image_dir = Path('/Users/pm286/misc/images')
imageWriter = ImageWriter(image_dir)
layoutParams = LAParams()
converter = TextConverter(resource_manager, fake_file_handle, codec='utf-8', laparams=layoutParams, imagewriter=imageWriter)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
print(f' ====== PDF FILE {file} =====')
with open(file, 'rb') as fh:
for (i, page) in enumerate(PDFPage.get_pages(fh, caching=True, check_extractable=True)):
page_interpreter.process_page(page)
print(f'=================== page {(i + 1)}=================')
text = fake_file_handle.getvalue()
converter.close()
fake_file_handle.close()
'\n > pdf2txt.py [-P password] [-o output] [-t text|html|xml|tag]\n [-O output_dir] [-c encoding] [-s scale] [-R rotation]\n [-Y normal|loose|exact] [-p pagenos] [-m maxpages]\n [-S] [-C] [-n] [-A] [-V]\n [-M char_margin] [-L line_margin] [-W word_margin]\n [-F boxes_flow] [-d]\n input.pdf ...\n-P password : PDF password.\n-o output : Output path name.\n-t text|html|xml|tag : Output type. (default: automatically inferred from the output path name.)\n-O output_dir : Output directory for extracted images.\n-c encoding : Output encoding. (default: utf-8)\n-s scale : Output scale.\n-R rotation : Rotates the page in degree.\n-Y normal|loose|exact : Specifies the layout mode. (only for HTML output.)\n-p pagenos : Processes certain pages only.\n-m maxpages : Limits the number of maximum pages to process.\n-S : Strips control characters.\n-C : Disables resource caching.\n-n : Disables layout analysis.\n-A : Applies layout analysis for all texts including figures.\n-V : Automatically detects vertical writing.\n-M char_margin : Speficies the char margin.\n-W word_margin : Speficies the word margin.\n-L line_margin : Speficies the line margin.\n-F boxes_flow : Speficies the box flow ratio.\n-d : Turns on Debug output.\n\n '
print(f'
......
{text[:100]}
...
{text[(- 100):]}
......
')
return text<|docstring|>converst a PDF path (to text)
Args:
file ([str]): filename<|endoftext|>
|
25bae95540935913117e90d76d5121e716bb68feaf330d9cb0a10676ff0757e7
|
def test_bresenham():
'\n test bresenham path interpolation\n '
assert (bresenham([0, 0], [2, 5]) == [[0, 0], [0, 1], [1, 2], [1, 3], [2, 4], [2, 5]])
assert (bresenham([0, 1], [0, 4]) == [[0, 1], [0, 2], [0, 3], [0, 4]])
|
test bresenham path interpolation
|
test/util_test.py
|
test_bresenham
|
Leibosite/python-pathfinding
| 208
|
python
|
def test_bresenham():
'\n \n '
assert (bresenham([0, 0], [2, 5]) == [[0, 0], [0, 1], [1, 2], [1, 3], [2, 4], [2, 5]])
assert (bresenham([0, 1], [0, 4]) == [[0, 1], [0, 2], [0, 3], [0, 4]])
|
def test_bresenham():
'\n \n '
assert (bresenham([0, 0], [2, 5]) == [[0, 0], [0, 1], [1, 2], [1, 3], [2, 4], [2, 5]])
assert (bresenham([0, 1], [0, 4]) == [[0, 1], [0, 2], [0, 3], [0, 4]])<|docstring|>test bresenham path interpolation<|endoftext|>
|
3b91c77dba769a8bbc4e12b96a4c3eaeca3eb194f3fa25fa66fec99c93cd9761
|
def test_raytrace():
'\n test raytrace path interpolation\n '
assert (raytrace([0, 0], [2, 5]) == [[0, 0], [0, 1], [1, 1], [1, 2], [1, 3], [1, 4], [2, 4], [2, 5]])
assert (raytrace([0, 1], [0, 4]) == [[0, 1], [0, 2], [0, 3], [0, 4]])
|
test raytrace path interpolation
|
test/util_test.py
|
test_raytrace
|
Leibosite/python-pathfinding
| 208
|
python
|
def test_raytrace():
'\n \n '
assert (raytrace([0, 0], [2, 5]) == [[0, 0], [0, 1], [1, 1], [1, 2], [1, 3], [1, 4], [2, 4], [2, 5]])
assert (raytrace([0, 1], [0, 4]) == [[0, 1], [0, 2], [0, 3], [0, 4]])
|
def test_raytrace():
'\n \n '
assert (raytrace([0, 0], [2, 5]) == [[0, 0], [0, 1], [1, 1], [1, 2], [1, 3], [1, 4], [2, 4], [2, 5]])
assert (raytrace([0, 1], [0, 4]) == [[0, 1], [0, 2], [0, 3], [0, 4]])<|docstring|>test raytrace path interpolation<|endoftext|>
|
862ce06e824168f7798019e1e4e8dc918258f0cfc4d5b73af30f8498b075ed84
|
def __init__(self, region_name=None) -> None:
'Initialize autoscaling scheduler.'
if region_name:
self.ec2 = boto3.client('ec2', region_name=region_name)
self.asg = boto3.client('autoscaling', region_name=region_name)
else:
self.ec2 = boto3.client('ec2')
self.asg = boto3.client('autoscaling')
|
Initialize autoscaling scheduler.
|
modules/auto-start/lambda-scheduler-stop-start/package/scheduler/autoscaling_handler.py
|
__init__
|
ministryofjustice/hmpps-terraform-modules
| 2
|
python
|
def __init__(self, region_name=None) -> None:
if region_name:
self.ec2 = boto3.client('ec2', region_name=region_name)
self.asg = boto3.client('autoscaling', region_name=region_name)
else:
self.ec2 = boto3.client('ec2')
self.asg = boto3.client('autoscaling')
|
def __init__(self, region_name=None) -> None:
if region_name:
self.ec2 = boto3.client('ec2', region_name=region_name)
self.asg = boto3.client('autoscaling', region_name=region_name)
else:
self.ec2 = boto3.client('ec2')
self.asg = boto3.client('autoscaling')<|docstring|>Initialize autoscaling scheduler.<|endoftext|>
|
03dfb9a7b9b47461d3391ecad3b47dad3b35b570a083c8dff182ba6e19aecdb3
|
def stop(self, tag_key: str, tag_value: str) -> None:
'Aws autoscaling suspend function.\n\n Suspend autoscaling group and stop its instances\n with defined tag.\n\n :param str tag_key:\n Aws tag key to use for filter resources\n :param str tag_value:\n Aws tag value to use for filter resources\n '
asg_list = self.list_groups(tag_key, tag_value)
instance_list = self.list_instances(asg_list)
for asg_name in asg_list:
try:
self.asg.suspend_processes(AutoScalingGroupName=asg_name)
print('Suspend autoscaling group {0}'.format(asg_name))
except ClientError as exc:
ec2_exception('instance', asg_name, exc)
for ec2_instance in instance_list:
try:
self.ec2.stop_instances(InstanceIds=[ec2_instance])
print('Stop autoscaling instances {0}'.format(ec2_instance))
except ClientError as exc:
ec2_exception('autoscaling group', ec2_instance, exc)
|
Aws autoscaling suspend function.
Suspend autoscaling group and stop its instances
with defined tag.
:param str tag_key:
Aws tag key to use for filter resources
:param str tag_value:
Aws tag value to use for filter resources
|
modules/auto-start/lambda-scheduler-stop-start/package/scheduler/autoscaling_handler.py
|
stop
|
ministryofjustice/hmpps-terraform-modules
| 2
|
python
|
def stop(self, tag_key: str, tag_value: str) -> None:
'Aws autoscaling suspend function.\n\n Suspend autoscaling group and stop its instances\n with defined tag.\n\n :param str tag_key:\n Aws tag key to use for filter resources\n :param str tag_value:\n Aws tag value to use for filter resources\n '
asg_list = self.list_groups(tag_key, tag_value)
instance_list = self.list_instances(asg_list)
for asg_name in asg_list:
try:
self.asg.suspend_processes(AutoScalingGroupName=asg_name)
print('Suspend autoscaling group {0}'.format(asg_name))
except ClientError as exc:
ec2_exception('instance', asg_name, exc)
for ec2_instance in instance_list:
try:
self.ec2.stop_instances(InstanceIds=[ec2_instance])
print('Stop autoscaling instances {0}'.format(ec2_instance))
except ClientError as exc:
ec2_exception('autoscaling group', ec2_instance, exc)
|
def stop(self, tag_key: str, tag_value: str) -> None:
'Aws autoscaling suspend function.\n\n Suspend autoscaling group and stop its instances\n with defined tag.\n\n :param str tag_key:\n Aws tag key to use for filter resources\n :param str tag_value:\n Aws tag value to use for filter resources\n '
asg_list = self.list_groups(tag_key, tag_value)
instance_list = self.list_instances(asg_list)
for asg_name in asg_list:
try:
self.asg.suspend_processes(AutoScalingGroupName=asg_name)
print('Suspend autoscaling group {0}'.format(asg_name))
except ClientError as exc:
ec2_exception('instance', asg_name, exc)
for ec2_instance in instance_list:
try:
self.ec2.stop_instances(InstanceIds=[ec2_instance])
print('Stop autoscaling instances {0}'.format(ec2_instance))
except ClientError as exc:
ec2_exception('autoscaling group', ec2_instance, exc)<|docstring|>Aws autoscaling suspend function.
Suspend autoscaling group and stop its instances
with defined tag.
:param str tag_key:
Aws tag key to use for filter resources
:param str tag_value:
Aws tag value to use for filter resources<|endoftext|>
|
0c83c38527742b8945333004d51a2f743b728529debb7f9718a6132e72aa844b
|
def start(self, tag_key: str, tag_value: str) -> None:
'Aws autoscaling resume function.\n\n Resume autoscaling group and start its instances\n with defined tag.\n\n :param str tag_key:\n Aws tag key to use for filter resources\n :param str tag_value:\n Aws tag value to use for filter resources\n '
asg_list = self.list_groups(tag_key, tag_value)
instance_list = self.list_instances(asg_list)
for asg_name in asg_list:
try:
self.asg.resume_processes(AutoScalingGroupName=asg_name)
print('Resume autoscaling group {0}'.format(asg_name))
except ClientError as exc:
ec2_exception('autoscaling group', asg_name, exc)
for ec2_instance in instance_list:
try:
self.ec2.start_instances(InstanceIds=[ec2_instance])
print('Start autoscaling instances {0}'.format(ec2_instance))
except ClientError as exc:
ec2_exception('instance', ec2_instance, exc)
|
Aws autoscaling resume function.
Resume autoscaling group and start its instances
with defined tag.
:param str tag_key:
Aws tag key to use for filter resources
:param str tag_value:
Aws tag value to use for filter resources
|
modules/auto-start/lambda-scheduler-stop-start/package/scheduler/autoscaling_handler.py
|
start
|
ministryofjustice/hmpps-terraform-modules
| 2
|
python
|
def start(self, tag_key: str, tag_value: str) -> None:
'Aws autoscaling resume function.\n\n Resume autoscaling group and start its instances\n with defined tag.\n\n :param str tag_key:\n Aws tag key to use for filter resources\n :param str tag_value:\n Aws tag value to use for filter resources\n '
asg_list = self.list_groups(tag_key, tag_value)
instance_list = self.list_instances(asg_list)
for asg_name in asg_list:
try:
self.asg.resume_processes(AutoScalingGroupName=asg_name)
print('Resume autoscaling group {0}'.format(asg_name))
except ClientError as exc:
ec2_exception('autoscaling group', asg_name, exc)
for ec2_instance in instance_list:
try:
self.ec2.start_instances(InstanceIds=[ec2_instance])
print('Start autoscaling instances {0}'.format(ec2_instance))
except ClientError as exc:
ec2_exception('instance', ec2_instance, exc)
|
def start(self, tag_key: str, tag_value: str) -> None:
'Aws autoscaling resume function.\n\n Resume autoscaling group and start its instances\n with defined tag.\n\n :param str tag_key:\n Aws tag key to use for filter resources\n :param str tag_value:\n Aws tag value to use for filter resources\n '
asg_list = self.list_groups(tag_key, tag_value)
instance_list = self.list_instances(asg_list)
for asg_name in asg_list:
try:
self.asg.resume_processes(AutoScalingGroupName=asg_name)
print('Resume autoscaling group {0}'.format(asg_name))
except ClientError as exc:
ec2_exception('autoscaling group', asg_name, exc)
for ec2_instance in instance_list:
try:
self.ec2.start_instances(InstanceIds=[ec2_instance])
print('Start autoscaling instances {0}'.format(ec2_instance))
except ClientError as exc:
ec2_exception('instance', ec2_instance, exc)<|docstring|>Aws autoscaling resume function.
Resume autoscaling group and start its instances
with defined tag.
:param str tag_key:
Aws tag key to use for filter resources
:param str tag_value:
Aws tag value to use for filter resources<|endoftext|>
|
abb7673913f160fb1179fd2b24b698715b6c932ce0676158632b8ffbfebb5b62
|
def list_groups(self, tag_key: str, tag_value: str) -> List[str]:
'Aws autoscaling list function.\n\n List name of all autoscaling groups with\n specific tag and return it in list.\n\n :param str tag_key:\n Aws tag key to use for filter resources\n :param str tag_value:\n Aws tag value to use for filter resources\n\n :return list asg_list:\n The names of the Auto Scaling groups\n '
asg_list = []
paginator = self.asg.get_paginator('describe_auto_scaling_groups')
for page in paginator.paginate():
for group in page['AutoScalingGroups']:
for tag in group['Tags']:
if ((tag['Key'] == tag_key) and (tag['Value'] == tag_value)):
asg_list.append(group['AutoScalingGroupName'])
return asg_list
|
Aws autoscaling list function.
List name of all autoscaling groups with
specific tag and return it in list.
:param str tag_key:
Aws tag key to use for filter resources
:param str tag_value:
Aws tag value to use for filter resources
:return list asg_list:
The names of the Auto Scaling groups
|
modules/auto-start/lambda-scheduler-stop-start/package/scheduler/autoscaling_handler.py
|
list_groups
|
ministryofjustice/hmpps-terraform-modules
| 2
|
python
|
def list_groups(self, tag_key: str, tag_value: str) -> List[str]:
'Aws autoscaling list function.\n\n List name of all autoscaling groups with\n specific tag and return it in list.\n\n :param str tag_key:\n Aws tag key to use for filter resources\n :param str tag_value:\n Aws tag value to use for filter resources\n\n :return list asg_list:\n The names of the Auto Scaling groups\n '
asg_list = []
paginator = self.asg.get_paginator('describe_auto_scaling_groups')
for page in paginator.paginate():
for group in page['AutoScalingGroups']:
for tag in group['Tags']:
if ((tag['Key'] == tag_key) and (tag['Value'] == tag_value)):
asg_list.append(group['AutoScalingGroupName'])
return asg_list
|
def list_groups(self, tag_key: str, tag_value: str) -> List[str]:
'Aws autoscaling list function.\n\n List name of all autoscaling groups with\n specific tag and return it in list.\n\n :param str tag_key:\n Aws tag key to use for filter resources\n :param str tag_value:\n Aws tag value to use for filter resources\n\n :return list asg_list:\n The names of the Auto Scaling groups\n '
asg_list = []
paginator = self.asg.get_paginator('describe_auto_scaling_groups')
for page in paginator.paginate():
for group in page['AutoScalingGroups']:
for tag in group['Tags']:
if ((tag['Key'] == tag_key) and (tag['Value'] == tag_value)):
asg_list.append(group['AutoScalingGroupName'])
return asg_list<|docstring|>Aws autoscaling list function.
List name of all autoscaling groups with
specific tag and return it in list.
:param str tag_key:
Aws tag key to use for filter resources
:param str tag_value:
Aws tag value to use for filter resources
:return list asg_list:
The names of the Auto Scaling groups<|endoftext|>
|
6fb1ee4d086089deb22f0639017cab2dbceaccbb00e52a357cacd21ad14fb63b
|
def list_instances(self, asg_list: List[str]) -> Iterator[str]:
'Aws autoscaling instance list function.\n\n List name of all instances in the autoscaling groups\n and return it in list.\n\n :param list asg_list:\n The names of the Auto Scaling groups.\n\n :yield Iterator[str]:\n The names of the instances in Auto Scaling groups.\n '
if (not asg_list):
return iter([])
paginator = self.asg.get_paginator('describe_auto_scaling_groups')
for page in paginator.paginate(AutoScalingGroupNames=asg_list):
for scalinggroup in page['AutoScalingGroups']:
for instance in scalinggroup['Instances']:
(yield instance['InstanceId'])
|
Aws autoscaling instance list function.
List name of all instances in the autoscaling groups
and return it in list.
:param list asg_list:
The names of the Auto Scaling groups.
:yield Iterator[str]:
The names of the instances in Auto Scaling groups.
|
modules/auto-start/lambda-scheduler-stop-start/package/scheduler/autoscaling_handler.py
|
list_instances
|
ministryofjustice/hmpps-terraform-modules
| 2
|
python
|
def list_instances(self, asg_list: List[str]) -> Iterator[str]:
'Aws autoscaling instance list function.\n\n List name of all instances in the autoscaling groups\n and return it in list.\n\n :param list asg_list:\n The names of the Auto Scaling groups.\n\n :yield Iterator[str]:\n The names of the instances in Auto Scaling groups.\n '
if (not asg_list):
return iter([])
paginator = self.asg.get_paginator('describe_auto_scaling_groups')
for page in paginator.paginate(AutoScalingGroupNames=asg_list):
for scalinggroup in page['AutoScalingGroups']:
for instance in scalinggroup['Instances']:
(yield instance['InstanceId'])
|
def list_instances(self, asg_list: List[str]) -> Iterator[str]:
'Aws autoscaling instance list function.\n\n List name of all instances in the autoscaling groups\n and return it in list.\n\n :param list asg_list:\n The names of the Auto Scaling groups.\n\n :yield Iterator[str]:\n The names of the instances in Auto Scaling groups.\n '
if (not asg_list):
return iter([])
paginator = self.asg.get_paginator('describe_auto_scaling_groups')
for page in paginator.paginate(AutoScalingGroupNames=asg_list):
for scalinggroup in page['AutoScalingGroups']:
for instance in scalinggroup['Instances']:
(yield instance['InstanceId'])<|docstring|>Aws autoscaling instance list function.
List name of all instances in the autoscaling groups
and return it in list.
:param list asg_list:
The names of the Auto Scaling groups.
:yield Iterator[str]:
The names of the instances in Auto Scaling groups.<|endoftext|>
|
267832dba2dcfefc7dbd18452eaf310581dacf0d5c04106396dbf960335b941f
|
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, description: Optional[pulumi.Input[str]]=None, network_interface_id: Optional[pulumi.Input[str]]=None, network_load_balancer_arn: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None, __name__=None, __opts__=None):
'\n Provides an Traffic mirror target. Read [limits and considerations](https://docs.aws.amazon.com/vpc/latest/mirroring/traffic-mirroring-considerations.html) for traffic mirroring\n\n ## Example Usage\n\n To create a basic traffic mirror session\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n nlb = aws.ec2.TrafficMirrorTarget("nlb",\n description="NLB target",\n network_load_balancer_arn=aws_lb["lb"]["arn"])\n eni = aws.ec2.TrafficMirrorTarget("eni",\n description="ENI target",\n network_interface_id=aws_instance["test"]["primary_network_interface_id"])\n ```\n\n ## Import\n\n Traffic mirror targets can be imported using the `id`, e.g.\n\n ```sh\n $ pulumi import aws:ec2/trafficMirrorTarget:TrafficMirrorTarget target tmt-0c13a005422b86606\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] description: A description of the traffic mirror session.\n :param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.\n :param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = _utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['network_interface_id'] = network_interface_id
__props__['network_load_balancer_arn'] = network_load_balancer_arn
__props__['tags'] = tags
__props__['arn'] = None
__props__['owner_id'] = None
super(TrafficMirrorTarget, __self__).__init__('aws:ec2/trafficMirrorTarget:TrafficMirrorTarget', resource_name, __props__, opts)
|
Provides an Traffic mirror target. Read [limits and considerations](https://docs.aws.amazon.com/vpc/latest/mirroring/traffic-mirroring-considerations.html) for traffic mirroring
## Example Usage
To create a basic traffic mirror session
```python
import pulumi
import pulumi_aws as aws
nlb = aws.ec2.TrafficMirrorTarget("nlb",
description="NLB target",
network_load_balancer_arn=aws_lb["lb"]["arn"])
eni = aws.ec2.TrafficMirrorTarget("eni",
description="ENI target",
network_interface_id=aws_instance["test"]["primary_network_interface_id"])
```
## Import
Traffic mirror targets can be imported using the `id`, e.g.
```sh
$ pulumi import aws:ec2/trafficMirrorTarget:TrafficMirrorTarget target tmt-0c13a005422b86606
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the traffic mirror session.
:param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.
:param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.
|
sdk/python/pulumi_aws/ec2/traffic_mirror_target.py
|
__init__
|
elad-snyk/pulumi-aws
| 0
|
python
|
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, description: Optional[pulumi.Input[str]]=None, network_interface_id: Optional[pulumi.Input[str]]=None, network_load_balancer_arn: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None, __name__=None, __opts__=None):
'\n Provides an Traffic mirror target. Read [limits and considerations](https://docs.aws.amazon.com/vpc/latest/mirroring/traffic-mirroring-considerations.html) for traffic mirroring\n\n ## Example Usage\n\n To create a basic traffic mirror session\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n nlb = aws.ec2.TrafficMirrorTarget("nlb",\n description="NLB target",\n network_load_balancer_arn=aws_lb["lb"]["arn"])\n eni = aws.ec2.TrafficMirrorTarget("eni",\n description="ENI target",\n network_interface_id=aws_instance["test"]["primary_network_interface_id"])\n ```\n\n ## Import\n\n Traffic mirror targets can be imported using the `id`, e.g.\n\n ```sh\n $ pulumi import aws:ec2/trafficMirrorTarget:TrafficMirrorTarget target tmt-0c13a005422b86606\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] description: A description of the traffic mirror session.\n :param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.\n :param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = _utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['network_interface_id'] = network_interface_id
__props__['network_load_balancer_arn'] = network_load_balancer_arn
__props__['tags'] = tags
__props__['arn'] = None
__props__['owner_id'] = None
super(TrafficMirrorTarget, __self__).__init__('aws:ec2/trafficMirrorTarget:TrafficMirrorTarget', resource_name, __props__, opts)
|
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, description: Optional[pulumi.Input[str]]=None, network_interface_id: Optional[pulumi.Input[str]]=None, network_load_balancer_arn: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None, __name__=None, __opts__=None):
'\n Provides an Traffic mirror target. Read [limits and considerations](https://docs.aws.amazon.com/vpc/latest/mirroring/traffic-mirroring-considerations.html) for traffic mirroring\n\n ## Example Usage\n\n To create a basic traffic mirror session\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n nlb = aws.ec2.TrafficMirrorTarget("nlb",\n description="NLB target",\n network_load_balancer_arn=aws_lb["lb"]["arn"])\n eni = aws.ec2.TrafficMirrorTarget("eni",\n description="ENI target",\n network_interface_id=aws_instance["test"]["primary_network_interface_id"])\n ```\n\n ## Import\n\n Traffic mirror targets can be imported using the `id`, e.g.\n\n ```sh\n $ pulumi import aws:ec2/trafficMirrorTarget:TrafficMirrorTarget target tmt-0c13a005422b86606\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] description: A description of the traffic mirror session.\n :param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.\n :param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = _utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['network_interface_id'] = network_interface_id
__props__['network_load_balancer_arn'] = network_load_balancer_arn
__props__['tags'] = tags
__props__['arn'] = None
__props__['owner_id'] = None
super(TrafficMirrorTarget, __self__).__init__('aws:ec2/trafficMirrorTarget:TrafficMirrorTarget', resource_name, __props__, opts)<|docstring|>Provides an Traffic mirror target. Read [limits and considerations](https://docs.aws.amazon.com/vpc/latest/mirroring/traffic-mirroring-considerations.html) for traffic mirroring
## Example Usage
To create a basic traffic mirror session
```python
import pulumi
import pulumi_aws as aws
nlb = aws.ec2.TrafficMirrorTarget("nlb",
description="NLB target",
network_load_balancer_arn=aws_lb["lb"]["arn"])
eni = aws.ec2.TrafficMirrorTarget("eni",
description="ENI target",
network_interface_id=aws_instance["test"]["primary_network_interface_id"])
```
## Import
Traffic mirror targets can be imported using the `id`, e.g.
```sh
$ pulumi import aws:ec2/trafficMirrorTarget:TrafficMirrorTarget target tmt-0c13a005422b86606
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the traffic mirror session.
:param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.
:param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.<|endoftext|>
|
40cd86e5b67644fceb77fb51cacf123ad2e57699b735a08abf361e7e5c1e0d81
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, arn: Optional[pulumi.Input[str]]=None, description: Optional[pulumi.Input[str]]=None, network_interface_id: Optional[pulumi.Input[str]]=None, network_load_balancer_arn: Optional[pulumi.Input[str]]=None, owner_id: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None) -> 'TrafficMirrorTarget':
"\n Get an existing TrafficMirrorTarget resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] arn: The ARN of the traffic mirror target.\n :param pulumi.Input[str] description: A description of the traffic mirror session.\n :param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.\n :param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.\n :param pulumi.Input[str] owner_id: The ID of the AWS account that owns the traffic mirror target.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__['arn'] = arn
__props__['description'] = description
__props__['network_interface_id'] = network_interface_id
__props__['network_load_balancer_arn'] = network_load_balancer_arn
__props__['owner_id'] = owner_id
__props__['tags'] = tags
return TrafficMirrorTarget(resource_name, opts=opts, __props__=__props__)
|
Get an existing TrafficMirrorTarget resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the traffic mirror target.
:param pulumi.Input[str] description: A description of the traffic mirror session.
:param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.
:param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.
:param pulumi.Input[str] owner_id: The ID of the AWS account that owns the traffic mirror target.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.
|
sdk/python/pulumi_aws/ec2/traffic_mirror_target.py
|
get
|
elad-snyk/pulumi-aws
| 0
|
python
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, arn: Optional[pulumi.Input[str]]=None, description: Optional[pulumi.Input[str]]=None, network_interface_id: Optional[pulumi.Input[str]]=None, network_load_balancer_arn: Optional[pulumi.Input[str]]=None, owner_id: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None) -> 'TrafficMirrorTarget':
"\n Get an existing TrafficMirrorTarget resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] arn: The ARN of the traffic mirror target.\n :param pulumi.Input[str] description: A description of the traffic mirror session.\n :param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.\n :param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.\n :param pulumi.Input[str] owner_id: The ID of the AWS account that owns the traffic mirror target.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__['arn'] = arn
__props__['description'] = description
__props__['network_interface_id'] = network_interface_id
__props__['network_load_balancer_arn'] = network_load_balancer_arn
__props__['owner_id'] = owner_id
__props__['tags'] = tags
return TrafficMirrorTarget(resource_name, opts=opts, __props__=__props__)
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, arn: Optional[pulumi.Input[str]]=None, description: Optional[pulumi.Input[str]]=None, network_interface_id: Optional[pulumi.Input[str]]=None, network_load_balancer_arn: Optional[pulumi.Input[str]]=None, owner_id: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None) -> 'TrafficMirrorTarget':
"\n Get an existing TrafficMirrorTarget resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] arn: The ARN of the traffic mirror target.\n :param pulumi.Input[str] description: A description of the traffic mirror session.\n :param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.\n :param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.\n :param pulumi.Input[str] owner_id: The ID of the AWS account that owns the traffic mirror target.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__['arn'] = arn
__props__['description'] = description
__props__['network_interface_id'] = network_interface_id
__props__['network_load_balancer_arn'] = network_load_balancer_arn
__props__['owner_id'] = owner_id
__props__['tags'] = tags
return TrafficMirrorTarget(resource_name, opts=opts, __props__=__props__)<|docstring|>Get an existing TrafficMirrorTarget resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the traffic mirror target.
:param pulumi.Input[str] description: A description of the traffic mirror session.
:param pulumi.Input[str] network_interface_id: The network interface ID that is associated with the target.
:param pulumi.Input[str] network_load_balancer_arn: The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.
:param pulumi.Input[str] owner_id: The ID of the AWS account that owns the traffic mirror target.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.<|endoftext|>
|
d2de90257f3f434e6031ecda7d240ac8fea4fd15ce21b439b7ae511c3edd3c33
|
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
'\n The ARN of the traffic mirror target.\n '
return pulumi.get(self, 'arn')
|
The ARN of the traffic mirror target.
|
sdk/python/pulumi_aws/ec2/traffic_mirror_target.py
|
arn
|
elad-snyk/pulumi-aws
| 0
|
python
|
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'arn')
|
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'arn')<|docstring|>The ARN of the traffic mirror target.<|endoftext|>
|
1a8b303af0113edec32421e9053acb79e290577de0d4a6ec76882828eec3e4e3
|
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
'\n A description of the traffic mirror session.\n '
return pulumi.get(self, 'description')
|
A description of the traffic mirror session.
|
sdk/python/pulumi_aws/ec2/traffic_mirror_target.py
|
description
|
elad-snyk/pulumi-aws
| 0
|
python
|
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'description')
|
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'description')<|docstring|>A description of the traffic mirror session.<|endoftext|>
|
8adead8de02e478b1a87af595fd6eb05ad90afed0620eb69aad4e3c38a636df2
|
@property
@pulumi.getter(name='networkInterfaceId')
def network_interface_id(self) -> pulumi.Output[Optional[str]]:
'\n The network interface ID that is associated with the target.\n '
return pulumi.get(self, 'network_interface_id')
|
The network interface ID that is associated with the target.
|
sdk/python/pulumi_aws/ec2/traffic_mirror_target.py
|
network_interface_id
|
elad-snyk/pulumi-aws
| 0
|
python
|
@property
@pulumi.getter(name='networkInterfaceId')
def network_interface_id(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'network_interface_id')
|
@property
@pulumi.getter(name='networkInterfaceId')
def network_interface_id(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'network_interface_id')<|docstring|>The network interface ID that is associated with the target.<|endoftext|>
|
8fb1ed09151f649a04b331030a131031bcd1653bce058014f223d970a8f64d07
|
@property
@pulumi.getter(name='networkLoadBalancerArn')
def network_load_balancer_arn(self) -> pulumi.Output[Optional[str]]:
'\n The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.\n '
return pulumi.get(self, 'network_load_balancer_arn')
|
The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.
|
sdk/python/pulumi_aws/ec2/traffic_mirror_target.py
|
network_load_balancer_arn
|
elad-snyk/pulumi-aws
| 0
|
python
|
@property
@pulumi.getter(name='networkLoadBalancerArn')
def network_load_balancer_arn(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'network_load_balancer_arn')
|
@property
@pulumi.getter(name='networkLoadBalancerArn')
def network_load_balancer_arn(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'network_load_balancer_arn')<|docstring|>The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target.<|endoftext|>
|
a21a21c93fddf3f76086cb0dd1c077ff3ef3f7b01513b9c0048e6b00e8ef0107
|
@property
@pulumi.getter(name='ownerId')
def owner_id(self) -> pulumi.Output[str]:
'\n The ID of the AWS account that owns the traffic mirror target.\n '
return pulumi.get(self, 'owner_id')
|
The ID of the AWS account that owns the traffic mirror target.
|
sdk/python/pulumi_aws/ec2/traffic_mirror_target.py
|
owner_id
|
elad-snyk/pulumi-aws
| 0
|
python
|
@property
@pulumi.getter(name='ownerId')
def owner_id(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'owner_id')
|
@property
@pulumi.getter(name='ownerId')
def owner_id(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'owner_id')<|docstring|>The ID of the AWS account that owns the traffic mirror target.<|endoftext|>
|
416315f8d18d21c102c575a732114c57a0691e45927ad99d1a7897c5e0a8b43a
|
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n Key-value map of resource tags.\n '
return pulumi.get(self, 'tags')
|
Key-value map of resource tags.
|
sdk/python/pulumi_aws/ec2/traffic_mirror_target.py
|
tags
|
elad-snyk/pulumi-aws
| 0
|
python
|
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n \n '
return pulumi.get(self, 'tags')
|
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n \n '
return pulumi.get(self, 'tags')<|docstring|>Key-value map of resource tags.<|endoftext|>
|
a0f7af2298b8786d35b084a21bdfbd67c4014589b8d0273ce7decf22666f92f0
|
def tcp_netcat(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run TCP traffic with netcat (nc)\n '
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('SERVER%s' % node))
hint_fn(('traffic=tcp type=netcat node=%s%s server=%d tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd1 = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('cat /dev/zero | nc -l %d >/dev/null' % server_port))]
cmd2 = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('sleep 0.2; nc -d %s %d >/dev/null' % (os.environ[('IP_SERVER%s' % node)], server_port)))]
if dry_run:
logger.debug(get_log_cmd(cmd1))
logger.debug(get_log_cmd(cmd2))
def stop_test():
pass
else:
pid1 = run_fn(cmd1)
pid2 = run_fn(cmd2)
processes.add_known_pid(pid1)
processes.add_known_pid(pid2)
def stop_test():
processes.kill_pid(pid1)
processes.kill_pid(pid2)
return stop_test
|
Run TCP traffic with netcat (nc)
|
aqmt/traffic.py
|
tcp_netcat
|
L4STeam/aqmt
| 7
|
python
|
def tcp_netcat(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n \n '
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('SERVER%s' % node))
hint_fn(('traffic=tcp type=netcat node=%s%s server=%d tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd1 = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('cat /dev/zero | nc -l %d >/dev/null' % server_port))]
cmd2 = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('sleep 0.2; nc -d %s %d >/dev/null' % (os.environ[('IP_SERVER%s' % node)], server_port)))]
if dry_run:
logger.debug(get_log_cmd(cmd1))
logger.debug(get_log_cmd(cmd2))
def stop_test():
pass
else:
pid1 = run_fn(cmd1)
pid2 = run_fn(cmd2)
processes.add_known_pid(pid1)
processes.add_known_pid(pid2)
def stop_test():
processes.kill_pid(pid1)
processes.kill_pid(pid2)
return stop_test
|
def tcp_netcat(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n \n '
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('SERVER%s' % node))
hint_fn(('traffic=tcp type=netcat node=%s%s server=%d tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd1 = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('cat /dev/zero | nc -l %d >/dev/null' % server_port))]
cmd2 = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('sleep 0.2; nc -d %s %d >/dev/null' % (os.environ[('IP_SERVER%s' % node)], server_port)))]
if dry_run:
logger.debug(get_log_cmd(cmd1))
logger.debug(get_log_cmd(cmd2))
def stop_test():
pass
else:
pid1 = run_fn(cmd1)
pid2 = run_fn(cmd2)
processes.add_known_pid(pid1)
processes.add_known_pid(pid2)
def stop_test():
processes.kill_pid(pid1)
processes.kill_pid(pid2)
return stop_test<|docstring|>Run TCP traffic with netcat (nc)<|endoftext|>
|
bf98bdbe5d108f815b3dc2806054e3edfc926b195cae3bdad7270bf97373b199
|
def tcp_iperf(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run TCP traffic with iperf2\n '
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('CLIENT%s' % node))
hint_fn(('traffic=tcp type=iperf2 node=%s%s client=%d tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd1 = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('iperf -s -p %d' % server_port))]
cmd2 = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('sleep 0.2; iperf -c %s -p %d -t 86400' % (os.environ[('IP_CLIENT%s' % node)], server_port)))]
logger.debug(get_log_cmd(cmd1))
logger.debug(get_log_cmd(cmd2))
if dry_run:
def stop_test():
pass
else:
pid1 = run_fn(cmd1)
pid2 = run_fn(cmd2)
processes.add_known_pid(pid1)
processes.add_known_pid(pid2)
def stop_test():
processes.kill_pid(pid1)
processes.kill_pid(pid2)
return stop_test
|
Run TCP traffic with iperf2
|
aqmt/traffic.py
|
tcp_iperf
|
L4STeam/aqmt
| 7
|
python
|
def tcp_iperf(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n \n '
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('CLIENT%s' % node))
hint_fn(('traffic=tcp type=iperf2 node=%s%s client=%d tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd1 = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('iperf -s -p %d' % server_port))]
cmd2 = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('sleep 0.2; iperf -c %s -p %d -t 86400' % (os.environ[('IP_CLIENT%s' % node)], server_port)))]
logger.debug(get_log_cmd(cmd1))
logger.debug(get_log_cmd(cmd2))
if dry_run:
def stop_test():
pass
else:
pid1 = run_fn(cmd1)
pid2 = run_fn(cmd2)
processes.add_known_pid(pid1)
processes.add_known_pid(pid2)
def stop_test():
processes.kill_pid(pid1)
processes.kill_pid(pid2)
return stop_test
|
def tcp_iperf(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n \n '
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('CLIENT%s' % node))
hint_fn(('traffic=tcp type=iperf2 node=%s%s client=%d tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd1 = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('iperf -s -p %d' % server_port))]
cmd2 = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('sleep 0.2; iperf -c %s -p %d -t 86400' % (os.environ[('IP_CLIENT%s' % node)], server_port)))]
logger.debug(get_log_cmd(cmd1))
logger.debug(get_log_cmd(cmd2))
if dry_run:
def stop_test():
pass
else:
pid1 = run_fn(cmd1)
pid2 = run_fn(cmd2)
processes.add_known_pid(pid1)
processes.add_known_pid(pid2)
def stop_test():
processes.kill_pid(pid1)
processes.kill_pid(pid2)
return stop_test<|docstring|>Run TCP traffic with iperf2<|endoftext|>
|
9d3448e6036a886aec18d50fd653df07f388933aadd7b1b374c796b7d179d859
|
def scp(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run TCP traffic with SCP (SFTP)\n\n Note there are some issues with the window size inside\n SSH as it uses its own sliding window. This test is therefore\n not reliable with a high BDP\n\n See:\n - http://www.slideshare.net/datacenters/enabling-high-performance-bulk-data-transfers-with-ssh\n - http://stackoverflow.com/questions/8849240/why-when-i-transfer-a-file-through-sftp-it-takes-longer-than-ftp\n\n All traffic goes over port 22 as of now. Tagging is\n not really possible because of this.\n '
server_port = (- 1)
node = ('A' if (node == 'a') else 'B')
hint_fn(('traffic=tcp type=scp node=%s%s server=%s tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('scp /opt/testbed/bigfile %s:/tmp/' % os.environ[('IP_CLIENT%s' % node)]))]
logger.debug(get_log_cmd(cmd))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd)
processes.add_known_pid(pid_server)
def stop_test():
processes.kill_pid(pid_server)
return stop_test
|
Run TCP traffic with SCP (SFTP)
Note there are some issues with the window size inside
SSH as it uses its own sliding window. This test is therefore
not reliable with a high BDP
See:
- http://www.slideshare.net/datacenters/enabling-high-performance-bulk-data-transfers-with-ssh
- http://stackoverflow.com/questions/8849240/why-when-i-transfer-a-file-through-sftp-it-takes-longer-than-ftp
All traffic goes over port 22 as of now. Tagging is
not really possible because of this.
|
aqmt/traffic.py
|
scp
|
L4STeam/aqmt
| 7
|
python
|
def scp(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run TCP traffic with SCP (SFTP)\n\n Note there are some issues with the window size inside\n SSH as it uses its own sliding window. This test is therefore\n not reliable with a high BDP\n\n See:\n - http://www.slideshare.net/datacenters/enabling-high-performance-bulk-data-transfers-with-ssh\n - http://stackoverflow.com/questions/8849240/why-when-i-transfer-a-file-through-sftp-it-takes-longer-than-ftp\n\n All traffic goes over port 22 as of now. Tagging is\n not really possible because of this.\n '
server_port = (- 1)
node = ('A' if (node == 'a') else 'B')
hint_fn(('traffic=tcp type=scp node=%s%s server=%s tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('scp /opt/testbed/bigfile %s:/tmp/' % os.environ[('IP_CLIENT%s' % node)]))]
logger.debug(get_log_cmd(cmd))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd)
processes.add_known_pid(pid_server)
def stop_test():
processes.kill_pid(pid_server)
return stop_test
|
def scp(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run TCP traffic with SCP (SFTP)\n\n Note there are some issues with the window size inside\n SSH as it uses its own sliding window. This test is therefore\n not reliable with a high BDP\n\n See:\n - http://www.slideshare.net/datacenters/enabling-high-performance-bulk-data-transfers-with-ssh\n - http://stackoverflow.com/questions/8849240/why-when-i-transfer-a-file-through-sftp-it-takes-longer-than-ftp\n\n All traffic goes over port 22 as of now. Tagging is\n not really possible because of this.\n '
server_port = (- 1)
node = ('A' if (node == 'a') else 'B')
hint_fn(('traffic=tcp type=scp node=%s%s server=%s tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('scp /opt/testbed/bigfile %s:/tmp/' % os.environ[('IP_CLIENT%s' % node)]))]
logger.debug(get_log_cmd(cmd))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd)
processes.add_known_pid(pid_server)
def stop_test():
processes.kill_pid(pid_server)
return stop_test<|docstring|>Run TCP traffic with SCP (SFTP)
Note there are some issues with the window size inside
SSH as it uses its own sliding window. This test is therefore
not reliable with a high BDP
See:
- http://www.slideshare.net/datacenters/enabling-high-performance-bulk-data-transfers-with-ssh
- http://stackoverflow.com/questions/8849240/why-when-i-transfer-a-file-through-sftp-it-takes-longer-than-ftp
All traffic goes over port 22 as of now. Tagging is
not really possible because of this.<|endoftext|>
|
33bbf7c4cb045477b3e13d0f670fcea31a2777fa6fdc7bd3992d25c0f5356f94
|
def sshstream(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run TCP traffic with SSH (streaming directly without scp)\n\n See scp method for concerns about high BDP and that\n this might not be precise.\n\n All traffic goes over port 22 as of now. Tagging is\n not really possible because of this.\n '
server_port = (- 1)
node = ('A' if (node == 'a') else 'B')
hint_fn(('traffic=tcp type=ssh node=%s%s server=%s tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ("\n dd if=/dev/zero | ssh %s 'cat - >/dev/null'\n " % os.environ[('IP_CLIENT%s' % node)]))]
logger.debug(get_log_cmd(cmd))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd)
processes.add_known_pid(pid_server)
def stop_test():
processes.kill_pid(pid_server)
return stop_test
|
Run TCP traffic with SSH (streaming directly without scp)
See scp method for concerns about high BDP and that
this might not be precise.
All traffic goes over port 22 as of now. Tagging is
not really possible because of this.
|
aqmt/traffic.py
|
sshstream
|
L4STeam/aqmt
| 7
|
python
|
def sshstream(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run TCP traffic with SSH (streaming directly without scp)\n\n See scp method for concerns about high BDP and that\n this might not be precise.\n\n All traffic goes over port 22 as of now. Tagging is\n not really possible because of this.\n '
server_port = (- 1)
node = ('A' if (node == 'a') else 'B')
hint_fn(('traffic=tcp type=ssh node=%s%s server=%s tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ("\n dd if=/dev/zero | ssh %s 'cat - >/dev/null'\n " % os.environ[('IP_CLIENT%s' % node)]))]
logger.debug(get_log_cmd(cmd))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd)
processes.add_known_pid(pid_server)
def stop_test():
processes.kill_pid(pid_server)
return stop_test
|
def sshstream(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run TCP traffic with SSH (streaming directly without scp)\n\n See scp method for concerns about high BDP and that\n this might not be precise.\n\n All traffic goes over port 22 as of now. Tagging is\n not really possible because of this.\n '
server_port = (- 1)
node = ('A' if (node == 'a') else 'B')
hint_fn(('traffic=tcp type=ssh node=%s%s server=%s tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ("\n dd if=/dev/zero | ssh %s 'cat - >/dev/null'\n " % os.environ[('IP_CLIENT%s' % node)]))]
logger.debug(get_log_cmd(cmd))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd)
processes.add_known_pid(pid_server)
def stop_test():
processes.kill_pid(pid_server)
return stop_test<|docstring|>Run TCP traffic with SSH (streaming directly without scp)
See scp method for concerns about high BDP and that
this might not be precise.
All traffic goes over port 22 as of now. Tagging is
not really possible because of this.<|endoftext|>
|
16a09e82806899a66c7f7a030f4690e98092ad1dedfb4be62113df46498304a9
|
def greedy(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run greedy TCP traffic\n\n Requires https://github.com/henrist/greedy on the machines\n (Available in the Docker version by default)\n\n Greedy = always data to send, full frames\n\n node: a or b (a is normally classic traffic, b is normally l4s)\n\n Tagging makes it possible to map similar traffic from multiple tests,\n despite being different ports and setup\n\n Returns a lambda to stop the traffic\n '
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('SERVER%s' % node))
hint_fn(('traffic=tcp type=greedy node=%s%s server=%s tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd1 = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('greedy -vv -s %d' % server_port))]
cmd2 = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('sleep 0.2; greedy -vv %s %d' % (os.environ[('IP_SERVER%s' % node)], server_port)))]
logger.debug(get_log_cmd(cmd1))
logger.debug(get_log_cmd(cmd2))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd1)
pid_client = run_fn(cmd2)
processes.add_known_pid(pid_server)
processes.add_known_pid(pid_client)
def stop_test():
processes.kill_pid(pid_server)
processes.kill_pid(pid_client)
return stop_test
|
Run greedy TCP traffic
Requires https://github.com/henrist/greedy on the machines
(Available in the Docker version by default)
Greedy = always data to send, full frames
node: a or b (a is normally classic traffic, b is normally l4s)
Tagging makes it possible to map similar traffic from multiple tests,
despite being different ports and setup
Returns a lambda to stop the traffic
|
aqmt/traffic.py
|
greedy
|
L4STeam/aqmt
| 7
|
python
|
def greedy(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run greedy TCP traffic\n\n Requires https://github.com/henrist/greedy on the machines\n (Available in the Docker version by default)\n\n Greedy = always data to send, full frames\n\n node: a or b (a is normally classic traffic, b is normally l4s)\n\n Tagging makes it possible to map similar traffic from multiple tests,\n despite being different ports and setup\n\n Returns a lambda to stop the traffic\n '
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('SERVER%s' % node))
hint_fn(('traffic=tcp type=greedy node=%s%s server=%s tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd1 = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('greedy -vv -s %d' % server_port))]
cmd2 = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('sleep 0.2; greedy -vv %s %d' % (os.environ[('IP_SERVER%s' % node)], server_port)))]
logger.debug(get_log_cmd(cmd1))
logger.debug(get_log_cmd(cmd2))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd1)
pid_client = run_fn(cmd2)
processes.add_known_pid(pid_server)
processes.add_known_pid(pid_client)
def stop_test():
processes.kill_pid(pid_server)
processes.kill_pid(pid_client)
return stop_test
|
def greedy(dry_run, testbed, hint_fn, run_fn, node='a', tag=None):
'\n Run greedy TCP traffic\n\n Requires https://github.com/henrist/greedy on the machines\n (Available in the Docker version by default)\n\n Greedy = always data to send, full frames\n\n node: a or b (a is normally classic traffic, b is normally l4s)\n\n Tagging makes it possible to map similar traffic from multiple tests,\n despite being different ports and setup\n\n Returns a lambda to stop the traffic\n '
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('SERVER%s' % node))
hint_fn(('traffic=tcp type=greedy node=%s%s server=%s tag=%s' % (node, node, server_port, ('No-tag' if (tag is None) else tag))))
cmd1 = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('greedy -vv -s %d' % server_port))]
cmd2 = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('sleep 0.2; greedy -vv %s %d' % (os.environ[('IP_SERVER%s' % node)], server_port)))]
logger.debug(get_log_cmd(cmd1))
logger.debug(get_log_cmd(cmd2))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd1)
pid_client = run_fn(cmd2)
processes.add_known_pid(pid_server)
processes.add_known_pid(pid_client)
def stop_test():
processes.kill_pid(pid_server)
processes.kill_pid(pid_client)
return stop_test<|docstring|>Run greedy TCP traffic
Requires https://github.com/henrist/greedy on the machines
(Available in the Docker version by default)
Greedy = always data to send, full frames
node: a or b (a is normally classic traffic, b is normally l4s)
Tagging makes it possible to map similar traffic from multiple tests,
despite being different ports and setup
Returns a lambda to stop the traffic<|endoftext|>
|
dedceeb7f0b2be3a13a24502025841bdcfb1965391df1f964913a34bb8e0df83
|
def udp(dry_run, testbed, hint_fn, run_fn, bitrate, node='a', ect='nonect', tag=None):
'\n Run UDP traffic at a constant bitrate\n\n ect: ect0 = ECT(0), ect1 = ECT(1), all other is Non-ECT\n\n Tagging makes it possible to map similar traffic from multiple tests,\n despite being different ports and setup\n\n Returns a lambda to stop the traffic\n '
tos = ''
if (ect == 'ect1'):
tos = '--tos 0x01'
elif (ect == 'ect0'):
tos = '--tos 0x02'
else:
ect = 'nonect'
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('CLIENT%s' % node))
hint_fn(('traffic=udp node=%s%s client=%s rate=%d ect=%s tag=%s' % (node, node, server_port, bitrate, ect, ('No-tag' if (tag is None) else tag))))
cmd_server = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('iperf -s -p %d' % server_port))]
framesize = 1514
headers = 42
length = (framesize - headers)
bitrate = ((bitrate * length) / framesize)
cmd_client = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('sleep 0.5; iperf -c %s -p %d %s -u -l %d -R -b %d -i 1 -t 99999' % (os.environ[('IP_CLIENT%s' % node)], server_port, tos, length, bitrate)))]
logger.debug(get_log_cmd(cmd_server))
logger.debug(get_log_cmd(cmd_client))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd_server)
pid_client = run_fn(cmd_client)
processes.add_known_pid(pid_server)
processes.add_known_pid(pid_client)
def stop_test():
processes.kill_pid(pid_client)
processes.kill_pid(pid_server)
return stop_test
|
Run UDP traffic at a constant bitrate
ect: ect0 = ECT(0), ect1 = ECT(1), all other is Non-ECT
Tagging makes it possible to map similar traffic from multiple tests,
despite being different ports and setup
Returns a lambda to stop the traffic
|
aqmt/traffic.py
|
udp
|
L4STeam/aqmt
| 7
|
python
|
def udp(dry_run, testbed, hint_fn, run_fn, bitrate, node='a', ect='nonect', tag=None):
'\n Run UDP traffic at a constant bitrate\n\n ect: ect0 = ECT(0), ect1 = ECT(1), all other is Non-ECT\n\n Tagging makes it possible to map similar traffic from multiple tests,\n despite being different ports and setup\n\n Returns a lambda to stop the traffic\n '
tos =
if (ect == 'ect1'):
tos = '--tos 0x01'
elif (ect == 'ect0'):
tos = '--tos 0x02'
else:
ect = 'nonect'
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('CLIENT%s' % node))
hint_fn(('traffic=udp node=%s%s client=%s rate=%d ect=%s tag=%s' % (node, node, server_port, bitrate, ect, ('No-tag' if (tag is None) else tag))))
cmd_server = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('iperf -s -p %d' % server_port))]
framesize = 1514
headers = 42
length = (framesize - headers)
bitrate = ((bitrate * length) / framesize)
cmd_client = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('sleep 0.5; iperf -c %s -p %d %s -u -l %d -R -b %d -i 1 -t 99999' % (os.environ[('IP_CLIENT%s' % node)], server_port, tos, length, bitrate)))]
logger.debug(get_log_cmd(cmd_server))
logger.debug(get_log_cmd(cmd_client))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd_server)
pid_client = run_fn(cmd_client)
processes.add_known_pid(pid_server)
processes.add_known_pid(pid_client)
def stop_test():
processes.kill_pid(pid_client)
processes.kill_pid(pid_server)
return stop_test
|
def udp(dry_run, testbed, hint_fn, run_fn, bitrate, node='a', ect='nonect', tag=None):
'\n Run UDP traffic at a constant bitrate\n\n ect: ect0 = ECT(0), ect1 = ECT(1), all other is Non-ECT\n\n Tagging makes it possible to map similar traffic from multiple tests,\n despite being different ports and setup\n\n Returns a lambda to stop the traffic\n '
tos =
if (ect == 'ect1'):
tos = '--tos 0x01'
elif (ect == 'ect0'):
tos = '--tos 0x02'
else:
ect = 'nonect'
node = ('A' if (node == 'a') else 'B')
server_port = testbed.get_next_traffic_port(('CLIENT%s' % node))
hint_fn(('traffic=udp node=%s%s client=%s rate=%d ect=%s tag=%s' % (node, node, server_port, bitrate, ect, ('No-tag' if (tag is None) else tag))))
cmd_server = ssh[('-tt', os.environ[('IP_CLIENT%s_MGMT' % node)], ('iperf -s -p %d' % server_port))]
framesize = 1514
headers = 42
length = (framesize - headers)
bitrate = ((bitrate * length) / framesize)
cmd_client = ssh[('-tt', os.environ[('IP_SERVER%s_MGMT' % node)], ('sleep 0.5; iperf -c %s -p %d %s -u -l %d -R -b %d -i 1 -t 99999' % (os.environ[('IP_CLIENT%s' % node)], server_port, tos, length, bitrate)))]
logger.debug(get_log_cmd(cmd_server))
logger.debug(get_log_cmd(cmd_client))
if dry_run:
def stop_test():
pass
else:
pid_server = run_fn(cmd_server)
pid_client = run_fn(cmd_client)
processes.add_known_pid(pid_server)
processes.add_known_pid(pid_client)
def stop_test():
processes.kill_pid(pid_client)
processes.kill_pid(pid_server)
return stop_test<|docstring|>Run UDP traffic at a constant bitrate
ect: ect0 = ECT(0), ect1 = ECT(1), all other is Non-ECT
Tagging makes it possible to map similar traffic from multiple tests,
despite being different ports and setup
Returns a lambda to stop the traffic<|endoftext|>
|
0c618d80dcd08529ddb72918a18592e08305b0026d0bf4d1cfa118e3cc0f9833
|
def testEmpty(self):
'\n If passed an empty readsAlignments, titleCounts must return an\n empty dictionary.\n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
self.assertEqual({}, titleCounts(readsAlignments))
|
If passed an empty readsAlignments, titleCounts must return an
empty dictionary.
|
test/diamond/test_titles.py
|
testEmpty
|
TaliVeith/dark-matter
| 10
|
python
|
def testEmpty(self):
'\n If passed an empty readsAlignments, titleCounts must return an\n empty dictionary.\n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
self.assertEqual({}, titleCounts(readsAlignments))
|
def testEmpty(self):
'\n If passed an empty readsAlignments, titleCounts must return an\n empty dictionary.\n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
self.assertEqual({}, titleCounts(readsAlignments))<|docstring|>If passed an empty readsAlignments, titleCounts must return an
empty dictionary.<|endoftext|>
|
abecd9627ea6d4f8e901e2c75638e281d19cda11ea2c01d99b07cc4555813a50
|
def testThreeRecords(self):
'\n If alignments for three reads are passed to titleCounts, it must\n return the correct title counts.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
self.assertEqual({'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99': 1, 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.': 1, 'gi|887699|gb|DQ37780 Cowpox virus 15': 1, 'gi|887699|gb|DQ37780 Monkeypox virus 456': 1, 'gi|887699|gb|DQ37780 Squirrelpox virus 55': 1}, titleCounts(readsAlignments))
|
If alignments for three reads are passed to titleCounts, it must
return the correct title counts.
|
test/diamond/test_titles.py
|
testThreeRecords
|
TaliVeith/dark-matter
| 10
|
python
|
def testThreeRecords(self):
'\n If alignments for three reads are passed to titleCounts, it must\n return the correct title counts.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
self.assertEqual({'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99': 1, 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.': 1, 'gi|887699|gb|DQ37780 Cowpox virus 15': 1, 'gi|887699|gb|DQ37780 Monkeypox virus 456': 1, 'gi|887699|gb|DQ37780 Squirrelpox virus 55': 1}, titleCounts(readsAlignments))
|
def testThreeRecords(self):
'\n If alignments for three reads are passed to titleCounts, it must\n return the correct title counts.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
self.assertEqual({'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99': 1, 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.': 1, 'gi|887699|gb|DQ37780 Cowpox virus 15': 1, 'gi|887699|gb|DQ37780 Monkeypox virus 456': 1, 'gi|887699|gb|DQ37780 Squirrelpox virus 55': 1}, titleCounts(readsAlignments))<|docstring|>If alignments for three reads are passed to titleCounts, it must
return the correct title counts.<|endoftext|>
|
9501ff8e3719319ad9a81a8246d47d63ee6dfe0dfa92e4ee7287e20cac585e17
|
def testDuplicatedTitle(self):
'\n If alignments for reads have a common title, the count on that title\n must be correct.\n '
mockOpener = mock_open(read_data=(((((dumps(PARAMS) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
self.assertEqual({'gi|887699|gb|DQ37780 Cowpox virus 15': 2}, titleCounts(readsAlignments))
|
If alignments for reads have a common title, the count on that title
must be correct.
|
test/diamond/test_titles.py
|
testDuplicatedTitle
|
TaliVeith/dark-matter
| 10
|
python
|
def testDuplicatedTitle(self):
'\n If alignments for reads have a common title, the count on that title\n must be correct.\n '
mockOpener = mock_open(read_data=(((((dumps(PARAMS) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
self.assertEqual({'gi|887699|gb|DQ37780 Cowpox virus 15': 2}, titleCounts(readsAlignments))
|
def testDuplicatedTitle(self):
'\n If alignments for reads have a common title, the count on that title\n must be correct.\n '
mockOpener = mock_open(read_data=(((((dumps(PARAMS) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
self.assertEqual({'gi|887699|gb|DQ37780 Cowpox virus 15': 2}, titleCounts(readsAlignments))<|docstring|>If alignments for reads have a common title, the count on that title
must be correct.<|endoftext|>
|
1d33cb07a763f8be931eafa13c14783ecfbbd3634f5f0cc38cefca688d3f93cf
|
def testEmpty(self):
'\n An instance of TitlesAlignments must have no titles if passed an\n empty readsAlignments instance.\n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual([], list(titlesAlignments))
|
An instance of TitlesAlignments must have no titles if passed an
empty readsAlignments instance.
|
test/diamond/test_titles.py
|
testEmpty
|
TaliVeith/dark-matter
| 10
|
python
|
def testEmpty(self):
'\n An instance of TitlesAlignments must have no titles if passed an\n empty readsAlignments instance.\n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual([], list(titlesAlignments))
|
def testEmpty(self):
'\n An instance of TitlesAlignments must have no titles if passed an\n empty readsAlignments instance.\n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual([], list(titlesAlignments))<|docstring|>An instance of TitlesAlignments must have no titles if passed an
empty readsAlignments instance.<|endoftext|>
|
fd38a178922ff65fec4e8041706edd835fee9bc47808db727508c115521422e1
|
def testExpectedTitles(self):
'\n An instance of TitlesAlignments must have the expected titles.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(titlesAlignments))
|
An instance of TitlesAlignments must have the expected titles.
|
test/diamond/test_titles.py
|
testExpectedTitles
|
TaliVeith/dark-matter
| 10
|
python
|
def testExpectedTitles(self):
'\n \n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(titlesAlignments))
|
def testExpectedTitles(self):
'\n \n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(titlesAlignments))<|docstring|>An instance of TitlesAlignments must have the expected titles.<|endoftext|>
|
95f4aa405986809266b13a6a4e0c2fe71ef84502406605c2069d65ac760a9d75
|
def testExpectedTitleDetails(self):
'\n An instance of TitleAlignments in a TitlesAlignments instance must\n have the expected attributes.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
read = Read('id0', ('A' * 70))
reads.add(read)
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(37000, titleAlignments.subjectLength)
self.assertEqual(1, len(titleAlignments))
self.assertEqual(read, titleAlignments[0].read)
self.assertEqual(HSP(20), titleAlignments[0].hsps[0])
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 55'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(38000, titleAlignments.subjectLength)
self.assertEqual(1, len(titleAlignments))
self.assertEqual(read, titleAlignments[0].read)
self.assertEqual(HSP(25), titleAlignments[0].hsps[0])
|
An instance of TitleAlignments in a TitlesAlignments instance must
have the expected attributes.
|
test/diamond/test_titles.py
|
testExpectedTitleDetails
|
TaliVeith/dark-matter
| 10
|
python
|
def testExpectedTitleDetails(self):
'\n An instance of TitleAlignments in a TitlesAlignments instance must\n have the expected attributes.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
read = Read('id0', ('A' * 70))
reads.add(read)
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(37000, titleAlignments.subjectLength)
self.assertEqual(1, len(titleAlignments))
self.assertEqual(read, titleAlignments[0].read)
self.assertEqual(HSP(20), titleAlignments[0].hsps[0])
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 55'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(38000, titleAlignments.subjectLength)
self.assertEqual(1, len(titleAlignments))
self.assertEqual(read, titleAlignments[0].read)
self.assertEqual(HSP(25), titleAlignments[0].hsps[0])
|
def testExpectedTitleDetails(self):
'\n An instance of TitleAlignments in a TitlesAlignments instance must\n have the expected attributes.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
read = Read('id0', ('A' * 70))
reads.add(read)
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(37000, titleAlignments.subjectLength)
self.assertEqual(1, len(titleAlignments))
self.assertEqual(read, titleAlignments[0].read)
self.assertEqual(HSP(20), titleAlignments[0].hsps[0])
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 55'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(38000, titleAlignments.subjectLength)
self.assertEqual(1, len(titleAlignments))
self.assertEqual(read, titleAlignments[0].read)
self.assertEqual(HSP(25), titleAlignments[0].hsps[0])<|docstring|>An instance of TitleAlignments in a TitlesAlignments instance must
have the expected attributes.<|endoftext|>
|
4143e05510c0172e00bd743d0c3e7e5799387a0298870bd345f3905f17fb8228
|
def testTitleCollection(self):
'\n A title that occurs in the alignments of multiple reads must have\n the data from both reads collected properly.\n '
mockOpener = mock_open(read_data=(((((dumps(PARAMS) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
read2 = Read('id2', ('A' * 70))
read3 = Read('id3', ('A' * 70))
reads.add(read2)
reads.add(read3)
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Cowpox virus 15'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(30000, titleAlignments.subjectLength)
self.assertEqual(2, len(titleAlignments))
self.assertEqual(read2, titleAlignments[0].read)
self.assertEqual(HSP(20), titleAlignments[0].hsps[0])
self.assertEqual(read3, titleAlignments[1].read)
self.assertEqual(HSP(20), titleAlignments[1].hsps[0])
|
A title that occurs in the alignments of multiple reads must have
the data from both reads collected properly.
|
test/diamond/test_titles.py
|
testTitleCollection
|
TaliVeith/dark-matter
| 10
|
python
|
def testTitleCollection(self):
'\n A title that occurs in the alignments of multiple reads must have\n the data from both reads collected properly.\n '
mockOpener = mock_open(read_data=(((((dumps(PARAMS) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
read2 = Read('id2', ('A' * 70))
read3 = Read('id3', ('A' * 70))
reads.add(read2)
reads.add(read3)
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Cowpox virus 15'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(30000, titleAlignments.subjectLength)
self.assertEqual(2, len(titleAlignments))
self.assertEqual(read2, titleAlignments[0].read)
self.assertEqual(HSP(20), titleAlignments[0].hsps[0])
self.assertEqual(read3, titleAlignments[1].read)
self.assertEqual(HSP(20), titleAlignments[1].hsps[0])
|
def testTitleCollection(self):
'\n A title that occurs in the alignments of multiple reads must have\n the data from both reads collected properly.\n '
mockOpener = mock_open(read_data=(((((dumps(PARAMS) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
read2 = Read('id2', ('A' * 70))
read3 = Read('id3', ('A' * 70))
reads.add(read2)
reads.add(read3)
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Cowpox virus 15'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(30000, titleAlignments.subjectLength)
self.assertEqual(2, len(titleAlignments))
self.assertEqual(read2, titleAlignments[0].read)
self.assertEqual(HSP(20), titleAlignments[0].hsps[0])
self.assertEqual(read3, titleAlignments[1].read)
self.assertEqual(HSP(20), titleAlignments[1].hsps[0])<|docstring|>A title that occurs in the alignments of multiple reads must have
the data from both reads collected properly.<|endoftext|>
|
2c2bf3f148f1a98495ff725f3c62384371fc697b82271e08a6a5bab13e06246f
|
def testAddTitleRepeat(self):
'\n The addTitle function must raise a C{KeyError} if an attempt is made\n to add a pre-existing title to a TitlesAlignments instance.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'
titleAlignments = TitleAlignments(title, 55)
error = "Title 'gi\\|887699\\|gb\\|DQ37780 Squirrelpox virus 1296/99' already present in TitlesAlignments instance\\."
six.assertRaisesRegex(self, KeyError, error, titlesAlignments.addTitle, title, titleAlignments)
|
The addTitle function must raise a C{KeyError} if an attempt is made
to add a pre-existing title to a TitlesAlignments instance.
|
test/diamond/test_titles.py
|
testAddTitleRepeat
|
TaliVeith/dark-matter
| 10
|
python
|
def testAddTitleRepeat(self):
'\n The addTitle function must raise a C{KeyError} if an attempt is made\n to add a pre-existing title to a TitlesAlignments instance.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'
titleAlignments = TitleAlignments(title, 55)
error = "Title 'gi\\|887699\\|gb\\|DQ37780 Squirrelpox virus 1296/99' already present in TitlesAlignments instance\\."
six.assertRaisesRegex(self, KeyError, error, titlesAlignments.addTitle, title, titleAlignments)
|
def testAddTitleRepeat(self):
'\n The addTitle function must raise a C{KeyError} if an attempt is made\n to add a pre-existing title to a TitlesAlignments instance.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'
titleAlignments = TitleAlignments(title, 55)
error = "Title 'gi\\|887699\\|gb\\|DQ37780 Squirrelpox virus 1296/99' already present in TitlesAlignments instance\\."
six.assertRaisesRegex(self, KeyError, error, titlesAlignments.addTitle, title, titleAlignments)<|docstring|>The addTitle function must raise a C{KeyError} if an attempt is made
to add a pre-existing title to a TitlesAlignments instance.<|endoftext|>
|
5873442f9c18398e24cd56e4c436cc652fa8a397df904f17ae922b03e9ac4345
|
def testAddTitle(self):
'\n The addTitle function must add a title to the TitlesAlignments\n instance.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 23'
titleAlignments = TitleAlignments(title, 55)
self.assertTrue((title not in titlesAlignments))
titlesAlignments.addTitle(title, titleAlignments)
self.assertTrue((title in titlesAlignments))
|
The addTitle function must add a title to the TitlesAlignments
instance.
|
test/diamond/test_titles.py
|
testAddTitle
|
TaliVeith/dark-matter
| 10
|
python
|
def testAddTitle(self):
'\n The addTitle function must add a title to the TitlesAlignments\n instance.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 23'
titleAlignments = TitleAlignments(title, 55)
self.assertTrue((title not in titlesAlignments))
titlesAlignments.addTitle(title, titleAlignments)
self.assertTrue((title in titlesAlignments))
|
def testAddTitle(self):
'\n The addTitle function must add a title to the TitlesAlignments\n instance.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 23'
titleAlignments = TitleAlignments(title, 55)
self.assertTrue((title not in titlesAlignments))
titlesAlignments.addTitle(title, titleAlignments)
self.assertTrue((title in titlesAlignments))<|docstring|>The addTitle function must add a title to the TitlesAlignments
instance.<|endoftext|>
|
c8d74c7a48ec2a1a037510de7721f3c5487cb2e83cd1143ce01bc8e6eb2053e6
|
def testHsps(self):
'\n The hsps function must yield all the hsps for all titles in a\n TitlesAlignments instance.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = list(titlesAlignments.hsps())
self.assertEqual(sorted([HSP(20), HSP(25), HSP(20), HSP(20), HSP(20)]), sorted(result))
|
The hsps function must yield all the hsps for all titles in a
TitlesAlignments instance.
|
test/diamond/test_titles.py
|
testHsps
|
TaliVeith/dark-matter
| 10
|
python
|
def testHsps(self):
'\n The hsps function must yield all the hsps for all titles in a\n TitlesAlignments instance.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = list(titlesAlignments.hsps())
self.assertEqual(sorted([HSP(20), HSP(25), HSP(20), HSP(20), HSP(20)]), sorted(result))
|
def testHsps(self):
'\n The hsps function must yield all the hsps for all titles in a\n TitlesAlignments instance.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = list(titlesAlignments.hsps())
self.assertEqual(sorted([HSP(20), HSP(25), HSP(20), HSP(20), HSP(20)]), sorted(result))<|docstring|>The hsps function must yield all the hsps for all titles in a
TitlesAlignments instance.<|endoftext|>
|
c58e765bbe946aaf5f4e7aa0cdd8c3c27a5b91263a3f6c2031daeeabbec983e1
|
def testSummary(self):
'\n The summary function must return the correct result.\n '
mockOpener = mock_open(read_data=(((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual([{'bestScore': 20.0, 'coverage': 0.00031428571428571427, 'hspCount': 1, 'medianScore': 20.0, 'readCount': 1, 'subjectLength': 35000, 'subjectTitle': 'gi|887699|gb|DQ37780 Monkeypox virus 456'}, {'bestScore': 20.0, 'coverage': 0.00031428571428571427, 'hspCount': 1, 'medianScore': 20.0, 'readCount': 1, 'subjectLength': 35000, 'subjectTitle': 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'}, {'bestScore': 20.0, 'coverage': 0.0002972972972972973, 'hspCount': 1, 'medianScore': 20.0, 'readCount': 1, 'subjectLength': 37000, 'subjectTitle': 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'}, {'bestScore': 25.0, 'coverage': 0.00028947368421052634, 'hspCount': 1, 'medianScore': 25.0, 'readCount': 1, 'subjectLength': 38000, 'subjectTitle': 'gi|887699|gb|DQ37780 Squirrelpox virus 55'}], list(titlesAlignments.summary(sortOn='title')))
|
The summary function must return the correct result.
|
test/diamond/test_titles.py
|
testSummary
|
TaliVeith/dark-matter
| 10
|
python
|
def testSummary(self):
'\n \n '
mockOpener = mock_open(read_data=(((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual([{'bestScore': 20.0, 'coverage': 0.00031428571428571427, 'hspCount': 1, 'medianScore': 20.0, 'readCount': 1, 'subjectLength': 35000, 'subjectTitle': 'gi|887699|gb|DQ37780 Monkeypox virus 456'}, {'bestScore': 20.0, 'coverage': 0.00031428571428571427, 'hspCount': 1, 'medianScore': 20.0, 'readCount': 1, 'subjectLength': 35000, 'subjectTitle': 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'}, {'bestScore': 20.0, 'coverage': 0.0002972972972972973, 'hspCount': 1, 'medianScore': 20.0, 'readCount': 1, 'subjectLength': 37000, 'subjectTitle': 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'}, {'bestScore': 25.0, 'coverage': 0.00028947368421052634, 'hspCount': 1, 'medianScore': 25.0, 'readCount': 1, 'subjectLength': 38000, 'subjectTitle': 'gi|887699|gb|DQ37780 Squirrelpox virus 55'}], list(titlesAlignments.summary(sortOn='title')))
|
def testSummary(self):
'\n \n '
mockOpener = mock_open(read_data=(((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual([{'bestScore': 20.0, 'coverage': 0.00031428571428571427, 'hspCount': 1, 'medianScore': 20.0, 'readCount': 1, 'subjectLength': 35000, 'subjectTitle': 'gi|887699|gb|DQ37780 Monkeypox virus 456'}, {'bestScore': 20.0, 'coverage': 0.00031428571428571427, 'hspCount': 1, 'medianScore': 20.0, 'readCount': 1, 'subjectLength': 35000, 'subjectTitle': 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'}, {'bestScore': 20.0, 'coverage': 0.0002972972972972973, 'hspCount': 1, 'medianScore': 20.0, 'readCount': 1, 'subjectLength': 37000, 'subjectTitle': 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'}, {'bestScore': 25.0, 'coverage': 0.00028947368421052634, 'hspCount': 1, 'medianScore': 25.0, 'readCount': 1, 'subjectLength': 38000, 'subjectTitle': 'gi|887699|gb|DQ37780 Squirrelpox virus 55'}], list(titlesAlignments.summary(sortOn='title')))<|docstring|>The summary function must return the correct result.<|endoftext|>
|
4780c023bb75aabd85d343461c8a124c83afa51f3db7a2bf8c6a7a385fde8459
|
def testTabSeparatedSummary(self):
'\n The summary function must return the correct result.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'f.json')
titlesAlignments = TitlesAlignments(readsAlignments)
summary = titlesAlignments.tabSeparatedSummary(sortOn='title')
expected = '0.000297\t20.000000\t20.000000\t1\t1\t37000\tgi|887699|gb|DQ37780 Squirrelpox virus 1296/99\n0.000289\t25.000000\t25.000000\t1\t1\t38000\tgi|887699|gb|DQ37780 Squirrelpox virus 55'
self.assertEqual(expected, summary)
|
The summary function must return the correct result.
|
test/diamond/test_titles.py
|
testTabSeparatedSummary
|
TaliVeith/dark-matter
| 10
|
python
|
def testTabSeparatedSummary(self):
'\n \n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'f.json')
titlesAlignments = TitlesAlignments(readsAlignments)
summary = titlesAlignments.tabSeparatedSummary(sortOn='title')
expected = '0.000297\t20.000000\t20.000000\t1\t1\t37000\tgi|887699|gb|DQ37780 Squirrelpox virus 1296/99\n0.000289\t25.000000\t25.000000\t1\t1\t38000\tgi|887699|gb|DQ37780 Squirrelpox virus 55'
self.assertEqual(expected, summary)
|
def testTabSeparatedSummary(self):
'\n \n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'f.json')
titlesAlignments = TitlesAlignments(readsAlignments)
summary = titlesAlignments.tabSeparatedSummary(sortOn='title')
expected = '0.000297\t20.000000\t20.000000\t1\t1\t37000\tgi|887699|gb|DQ37780 Squirrelpox virus 1296/99\n0.000289\t25.000000\t25.000000\t1\t1\t38000\tgi|887699|gb|DQ37780 Squirrelpox virus 55'
self.assertEqual(expected, summary)<|docstring|>The summary function must return the correct result.<|endoftext|>
|
81ebb94d5ca1ece1f8d1fbe6be5a011b582572f0ba8cc763a90ef927ae1576ed
|
def testFilterWithNoArguments(self):
'\n The filter function must return a TitlesAlignments instance with all\n the titles of the original when called with no arguments.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter()
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
The filter function must return a TitlesAlignments instance with all
the titles of the original when called with no arguments.
|
test/diamond/test_titles.py
|
testFilterWithNoArguments
|
TaliVeith/dark-matter
| 10
|
python
|
def testFilterWithNoArguments(self):
'\n The filter function must return a TitlesAlignments instance with all\n the titles of the original when called with no arguments.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter()
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
def testFilterWithNoArguments(self):
'\n The filter function must return a TitlesAlignments instance with all\n the titles of the original when called with no arguments.\n '
mockOpener = mock_open(read_data=(((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter()
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))<|docstring|>The filter function must return a TitlesAlignments instance with all
the titles of the original when called with no arguments.<|endoftext|>
|
2cff71aaae5ed9365b0c7da2efed985d1bade06c7fe8f1c0a86738b78d9051a2
|
def testMinMatchingReads(self):
'\n The filter function must work correctly when passed a value for\n minMatchingReads.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMatchingReads=2)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15'], list(result))
|
The filter function must work correctly when passed a value for
minMatchingReads.
|
test/diamond/test_titles.py
|
testMinMatchingReads
|
TaliVeith/dark-matter
| 10
|
python
|
def testMinMatchingReads(self):
'\n The filter function must work correctly when passed a value for\n minMatchingReads.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMatchingReads=2)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15'], list(result))
|
def testMinMatchingReads(self):
'\n The filter function must work correctly when passed a value for\n minMatchingReads.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMatchingReads=2)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15'], list(result))<|docstring|>The filter function must work correctly when passed a value for
minMatchingReads.<|endoftext|>
|
ce41abc2dc628bf12b761e1395fbd46c97dc773dade86c2f492b0a34e56533c1
|
def testMaxMatchingReads(self):
'\n The filter function must work correctly when passed a value for\n maxMatchingReads.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxMatchingReads=1)
self.assertEqual(sorted(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.']), sorted(result))
|
The filter function must work correctly when passed a value for
maxMatchingReads.
|
test/diamond/test_titles.py
|
testMaxMatchingReads
|
TaliVeith/dark-matter
| 10
|
python
|
def testMaxMatchingReads(self):
'\n The filter function must work correctly when passed a value for\n maxMatchingReads.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxMatchingReads=1)
self.assertEqual(sorted(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.']), sorted(result))
|
def testMaxMatchingReads(self):
'\n The filter function must work correctly when passed a value for\n maxMatchingReads.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxMatchingReads=1)
self.assertEqual(sorted(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.']), sorted(result))<|docstring|>The filter function must work correctly when passed a value for
maxMatchingReads.<|endoftext|>
|
666572375ba66fbb3a20ec6c414720d84c47f7f336c3a9f3bb910df4d5ad154a
|
def testMinMedianScore_Bits(self):
'\n The filter function must work correctly when passed a value for\n minMedianScore when using bit scores.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMedianScore=22)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55'], list(result))
|
The filter function must work correctly when passed a value for
minMedianScore when using bit scores.
|
test/diamond/test_titles.py
|
testMinMedianScore_Bits
|
TaliVeith/dark-matter
| 10
|
python
|
def testMinMedianScore_Bits(self):
'\n The filter function must work correctly when passed a value for\n minMedianScore when using bit scores.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMedianScore=22)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55'], list(result))
|
def testMinMedianScore_Bits(self):
'\n The filter function must work correctly when passed a value for\n minMedianScore when using bit scores.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMedianScore=22)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55'], list(result))<|docstring|>The filter function must work correctly when passed a value for
minMedianScore when using bit scores.<|endoftext|>
|
a344bb022240713eaa46fbde83b63ea8a562ab8b42d2013093b6f456f090325f
|
def testMinMedianScore_EValue(self):
'\n The filter function must work correctly when passed a value for\n minMedianScore when using e values.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMedianScore=1e-09)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
The filter function must work correctly when passed a value for
minMedianScore when using e values.
|
test/diamond/test_titles.py
|
testMinMedianScore_EValue
|
TaliVeith/dark-matter
| 10
|
python
|
def testMinMedianScore_EValue(self):
'\n The filter function must work correctly when passed a value for\n minMedianScore when using e values.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMedianScore=1e-09)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
def testMinMedianScore_EValue(self):
'\n The filter function must work correctly when passed a value for\n minMedianScore when using e values.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMedianScore=1e-09)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))<|docstring|>The filter function must work correctly when passed a value for
minMedianScore when using e values.<|endoftext|>
|
6a2e6cecf01201e16702e9931c1b0d47b631b83cea737b12a17b919874740b6a
|
def testWithScoreBetterThan_Bits(self):
'\n The filter function must work correctly when passed a value for\n withScoreBetterThan when using bit scores.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(withScoreBetterThan=24)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55'], list(result))
|
The filter function must work correctly when passed a value for
withScoreBetterThan when using bit scores.
|
test/diamond/test_titles.py
|
testWithScoreBetterThan_Bits
|
TaliVeith/dark-matter
| 10
|
python
|
def testWithScoreBetterThan_Bits(self):
'\n The filter function must work correctly when passed a value for\n withScoreBetterThan when using bit scores.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(withScoreBetterThan=24)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55'], list(result))
|
def testWithScoreBetterThan_Bits(self):
'\n The filter function must work correctly when passed a value for\n withScoreBetterThan when using bit scores.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(withScoreBetterThan=24)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55'], list(result))<|docstring|>The filter function must work correctly when passed a value for
withScoreBetterThan when using bit scores.<|endoftext|>
|
9b12fb76996882e4070e001bac7bdae32cda64caa10cc775cfdf2b5b51efdf54
|
def testWithScoreBetterThan_EValue(self):
'\n The filter function must work correctly when passed a value for\n withScoreBetterThan when using e values.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(withScoreBetterThan=1e-10)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'], list(result))
|
The filter function must work correctly when passed a value for
withScoreBetterThan when using e values.
|
test/diamond/test_titles.py
|
testWithScoreBetterThan_EValue
|
TaliVeith/dark-matter
| 10
|
python
|
def testWithScoreBetterThan_EValue(self):
'\n The filter function must work correctly when passed a value for\n withScoreBetterThan when using e values.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(withScoreBetterThan=1e-10)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'], list(result))
|
def testWithScoreBetterThan_EValue(self):
'\n The filter function must work correctly when passed a value for\n withScoreBetterThan when using e values.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(withScoreBetterThan=1e-10)
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'], list(result))<|docstring|>The filter function must work correctly when passed a value for
withScoreBetterThan when using e values.<|endoftext|>
|
f9f40a1bb50bb5fb276abe733bdf83c8550327193d2a5418d8b1cb1ac5118764
|
def testReadSetFilterAllowAnything(self):
'\n The filter function must work correctly when passed a 0.0 value for\n minNewReads, i.e. that considers any read set sufficiently novel.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minNewReads=0.0)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
The filter function must work correctly when passed a 0.0 value for
minNewReads, i.e. that considers any read set sufficiently novel.
|
test/diamond/test_titles.py
|
testReadSetFilterAllowAnything
|
TaliVeith/dark-matter
| 10
|
python
|
def testReadSetFilterAllowAnything(self):
'\n The filter function must work correctly when passed a 0.0 value for\n minNewReads, i.e. that considers any read set sufficiently novel.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minNewReads=0.0)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
def testReadSetFilterAllowAnything(self):
'\n The filter function must work correctly when passed a 0.0 value for\n minNewReads, i.e. that considers any read set sufficiently novel.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minNewReads=0.0)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))<|docstring|>The filter function must work correctly when passed a 0.0 value for
minNewReads, i.e. that considers any read set sufficiently novel.<|endoftext|>
|
69823f38ae6fcddfe047d913c9dd3c78e4028c95f51702740eb480082e64ceba
|
def testReadSetFilterStrict(self):
'\n The filter function must work correctly when passed a 1.0 value for\n minNewReads.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minNewReads=1.0)
mummypox = 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'
monkeypox = 'gi|887699|gb|DQ37780 Monkeypox virus 456'
assertionCount = 0
if (mummypox in result):
self.assertTrue((monkeypox in result.readSetFilter.invalidates(mummypox)))
assertionCount += 1
if (monkeypox in result):
self.assertTrue((mummypox in result.readSetFilter.invalidates(monkeypox)))
assertionCount += 1
self.assertEqual(1, assertionCount)
|
The filter function must work correctly when passed a 1.0 value for
minNewReads.
|
test/diamond/test_titles.py
|
testReadSetFilterStrict
|
TaliVeith/dark-matter
| 10
|
python
|
def testReadSetFilterStrict(self):
'\n The filter function must work correctly when passed a 1.0 value for\n minNewReads.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minNewReads=1.0)
mummypox = 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'
monkeypox = 'gi|887699|gb|DQ37780 Monkeypox virus 456'
assertionCount = 0
if (mummypox in result):
self.assertTrue((monkeypox in result.readSetFilter.invalidates(mummypox)))
assertionCount += 1
if (monkeypox in result):
self.assertTrue((mummypox in result.readSetFilter.invalidates(monkeypox)))
assertionCount += 1
self.assertEqual(1, assertionCount)
|
def testReadSetFilterStrict(self):
'\n The filter function must work correctly when passed a 1.0 value for\n minNewReads.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minNewReads=1.0)
mummypox = 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'
monkeypox = 'gi|887699|gb|DQ37780 Monkeypox virus 456'
assertionCount = 0
if (mummypox in result):
self.assertTrue((monkeypox in result.readSetFilter.invalidates(mummypox)))
assertionCount += 1
if (monkeypox in result):
self.assertTrue((mummypox in result.readSetFilter.invalidates(monkeypox)))
assertionCount += 1
self.assertEqual(1, assertionCount)<|docstring|>The filter function must work correctly when passed a 1.0 value for
minNewReads.<|endoftext|>
|
d6868aa477818c9fc3ce178d95bc8545960f0f610f28c880fbfdc5728a3eeb73
|
def testCoverageExcludesAll(self):
'\n The coverage function must return an titlesAlignments instance with\n no titles if none of its titles has sufficient coverage.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.1)
self.assertEqual(0, len(result))
|
The coverage function must return an titlesAlignments instance with
no titles if none of its titles has sufficient coverage.
|
test/diamond/test_titles.py
|
testCoverageExcludesAll
|
TaliVeith/dark-matter
| 10
|
python
|
def testCoverageExcludesAll(self):
'\n The coverage function must return an titlesAlignments instance with\n no titles if none of its titles has sufficient coverage.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.1)
self.assertEqual(0, len(result))
|
def testCoverageExcludesAll(self):
'\n The coverage function must return an titlesAlignments instance with\n no titles if none of its titles has sufficient coverage.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.1)
self.assertEqual(0, len(result))<|docstring|>The coverage function must return an titlesAlignments instance with
no titles if none of its titles has sufficient coverage.<|endoftext|>
|
ac57b6c775a62e69e9958ff153f87e5dd8cb42f4ab327fa44660544faed1ab12
|
def testCoverageIncludesAll(self):
'\n The coverage function must return an titlesAlignments instance with\n all titles if all its titles has sufficient coverage.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.0)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
The coverage function must return an titlesAlignments instance with
all titles if all its titles has sufficient coverage.
|
test/diamond/test_titles.py
|
testCoverageIncludesAll
|
TaliVeith/dark-matter
| 10
|
python
|
def testCoverageIncludesAll(self):
'\n The coverage function must return an titlesAlignments instance with\n all titles if all its titles has sufficient coverage.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.0)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
def testCoverageIncludesAll(self):
'\n The coverage function must return an titlesAlignments instance with\n all titles if all its titles has sufficient coverage.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.0)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))<|docstring|>The coverage function must return an titlesAlignments instance with
all titles if all its titles has sufficient coverage.<|endoftext|>
|
6c767def65db30a630f5960a33817ab1e05fc6d146487af02360a16b0c150d25
|
def testCoverageIncludesSome(self):
'\n The coverage function must return an titlesAlignments instance with\n only the expected titles if only some of its titles have sufficient\n coverage.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.0003)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'], sorted(result))
|
The coverage function must return an titlesAlignments instance with
only the expected titles if only some of its titles have sufficient
coverage.
|
test/diamond/test_titles.py
|
testCoverageIncludesSome
|
TaliVeith/dark-matter
| 10
|
python
|
def testCoverageIncludesSome(self):
'\n The coverage function must return an titlesAlignments instance with\n only the expected titles if only some of its titles have sufficient\n coverage.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.0003)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'], sorted(result))
|
def testCoverageIncludesSome(self):
'\n The coverage function must return an titlesAlignments instance with\n only the expected titles if only some of its titles have sufficient\n coverage.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.0003)
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'], sorted(result))<|docstring|>The coverage function must return an titlesAlignments instance with
only the expected titles if only some of its titles have sufficient
coverage.<|endoftext|>
|
d9c782814262807b5b6e52598656372989b263ae538d03b2298e69e2746cfb51
|
def testMaxTitlesNegative(self):
'\n The filter function must raise a ValueError if maxTitles is less than\n zero.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
error = '^maxTitles \\(-1\\) cannot be negative\\.$'
six.assertRaisesRegex(self, ValueError, error, titlesAlignments.filter, maxTitles=(- 1))
|
The filter function must raise a ValueError if maxTitles is less than
zero.
|
test/diamond/test_titles.py
|
testMaxTitlesNegative
|
TaliVeith/dark-matter
| 10
|
python
|
def testMaxTitlesNegative(self):
'\n The filter function must raise a ValueError if maxTitles is less than\n zero.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
error = '^maxTitles \\(-1\\) cannot be negative\\.$'
six.assertRaisesRegex(self, ValueError, error, titlesAlignments.filter, maxTitles=(- 1))
|
def testMaxTitlesNegative(self):
'\n The filter function must raise a ValueError if maxTitles is less than\n zero.\n '
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
error = '^maxTitles \\(-1\\) cannot be negative\\.$'
six.assertRaisesRegex(self, ValueError, error, titlesAlignments.filter, maxTitles=(- 1))<|docstring|>The filter function must raise a ValueError if maxTitles is less than
zero.<|endoftext|>
|
1e3075f36e74f3ded2a9c45bd45d163d6076021e203626c998dc72927ab6ce8e
|
def testUnknownSortOn(self):
"\n The filter function must raise a ValueError if the passed sortOn\n value isn't recognized.\n "
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
error = '^Sort attribute must be one of "length", "maxScore", "medianScore", "readCount", "title"\\.$'
six.assertRaisesRegex(self, ValueError, error, titlesAlignments.filter, maxTitles=0, sortOn='unknown')
|
The filter function must raise a ValueError if the passed sortOn
value isn't recognized.
|
test/diamond/test_titles.py
|
testUnknownSortOn
|
TaliVeith/dark-matter
| 10
|
python
|
def testUnknownSortOn(self):
"\n The filter function must raise a ValueError if the passed sortOn\n value isn't recognized.\n "
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
error = '^Sort attribute must be one of "length", "maxScore", "medianScore", "readCount", "title"\\.$'
six.assertRaisesRegex(self, ValueError, error, titlesAlignments.filter, maxTitles=0, sortOn='unknown')
|
def testUnknownSortOn(self):
"\n The filter function must raise a ValueError if the passed sortOn\n value isn't recognized.\n "
mockOpener = mock_open(read_data=(((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
error = '^Sort attribute must be one of "length", "maxScore", "medianScore", "readCount", "title"\\.$'
six.assertRaisesRegex(self, ValueError, error, titlesAlignments.filter, maxTitles=0, sortOn='unknown')<|docstring|>The filter function must raise a ValueError if the passed sortOn
value isn't recognized.<|endoftext|>
|
9741ebc9c8476f1467e9d891908981fdc21ec540fcb9267eea1ceaf63212a318
|
def testMaxTitlesZero(self):
'\n The filter function must return an empty result when maxTitles is zero.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=0, sortOn='maxScore')
self.assertEqual(0, len(result))
|
The filter function must return an empty result when maxTitles is zero.
|
test/diamond/test_titles.py
|
testMaxTitlesZero
|
TaliVeith/dark-matter
| 10
|
python
|
def testMaxTitlesZero(self):
'\n \n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=0, sortOn='maxScore')
self.assertEqual(0, len(result))
|
def testMaxTitlesZero(self):
'\n \n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=0, sortOn='maxScore')
self.assertEqual(0, len(result))<|docstring|>The filter function must return an empty result when maxTitles is zero.<|endoftext|>
|
e22be525a35c84f0b93785a822bca06304cad6c70a62057c959ab2c9cfbf9466
|
def testMaxTitlesOne(self):
'\n The filter function must return just the best title when maxTitles\n is one.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=1, sortOn='maxScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
The filter function must return just the best title when maxTitles
is one.
|
test/diamond/test_titles.py
|
testMaxTitlesOne
|
TaliVeith/dark-matter
| 10
|
python
|
def testMaxTitlesOne(self):
'\n The filter function must return just the best title when maxTitles\n is one.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=1, sortOn='maxScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
def testMaxTitlesOne(self):
'\n The filter function must return just the best title when maxTitles\n is one.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=1, sortOn='maxScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))<|docstring|>The filter function must return just the best title when maxTitles
is one.<|endoftext|>
|
8a3cca3ad9daa9b8bd9d469933b604c474b37f0723b554b334088c188332d121
|
def testMaxTitlesTwoSortOnLength(self):
"\n The filter function must return the two titles whose sequences are the\n longest when maxTitles is 2 and sortOn is 'length'.\n "
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=2, sortOn='length')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
The filter function must return the two titles whose sequences are the
longest when maxTitles is 2 and sortOn is 'length'.
|
test/diamond/test_titles.py
|
testMaxTitlesTwoSortOnLength
|
TaliVeith/dark-matter
| 10
|
python
|
def testMaxTitlesTwoSortOnLength(self):
"\n The filter function must return the two titles whose sequences are the\n longest when maxTitles is 2 and sortOn is 'length'.\n "
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=2, sortOn='length')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))
|
def testMaxTitlesTwoSortOnLength(self):
"\n The filter function must return the two titles whose sequences are the\n longest when maxTitles is 2 and sortOn is 'length'.\n "
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=2, sortOn='length')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], sorted(result))<|docstring|>The filter function must return the two titles whose sequences are the
longest when maxTitles is 2 and sortOn is 'length'.<|endoftext|>
|
79132cce2d4c8a8e33c6e4bcbaf7a28b5c1cbdb1647d819cb8c675284e730980
|
def testUnknown(self):
'\n Sorting on an unknown attribute must raise C{ValueError}.\n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertRaises(ValueError, titlesAlignments.sortTitles, 'xxx')
|
Sorting on an unknown attribute must raise C{ValueError}.
|
test/diamond/test_titles.py
|
testUnknown
|
TaliVeith/dark-matter
| 10
|
python
|
def testUnknown(self):
'\n \n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertRaises(ValueError, titlesAlignments.sortTitles, 'xxx')
|
def testUnknown(self):
'\n \n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertRaises(ValueError, titlesAlignments.sortTitles, 'xxx')<|docstring|>Sorting on an unknown attribute must raise C{ValueError}.<|endoftext|>
|
630e253312b9810cf8c164f76f4a6e5d722abc8fe1113d4aaa5083c8e37a8e45
|
def testEmpty(self):
'\n Sorting when there are no titles must return the empty list.\n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('title')
self.assertEqual([], result)
|
Sorting when there are no titles must return the empty list.
|
test/diamond/test_titles.py
|
testEmpty
|
TaliVeith/dark-matter
| 10
|
python
|
def testEmpty(self):
'\n \n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('title')
self.assertEqual([], result)
|
def testEmpty(self):
'\n \n '
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('title')
self.assertEqual([], result)<|docstring|>Sorting when there are no titles must return the empty list.<|endoftext|>
|
03c83c8cc990641e4f682d179254936874b976112744381594b944f67d49a38f
|
def testMedianScore_Bits(self):
'\n Sorting on median score must work when scores are bit scores,\n including a secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n') + dumps(RECORD4)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
reads.add(Read('id4', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('medianScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)
|
Sorting on median score must work when scores are bit scores,
including a secondary sort on title.
|
test/diamond/test_titles.py
|
testMedianScore_Bits
|
TaliVeith/dark-matter
| 10
|
python
|
def testMedianScore_Bits(self):
'\n Sorting on median score must work when scores are bit scores,\n including a secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n') + dumps(RECORD4)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
reads.add(Read('id4', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('medianScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)
|
def testMedianScore_Bits(self):
'\n Sorting on median score must work when scores are bit scores,\n including a secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n') + dumps(RECORD4)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
reads.add(Read('id4', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('medianScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)<|docstring|>Sorting on median score must work when scores are bit scores,
including a secondary sort on title.<|endoftext|>
|
3091b7d2840c47abf055856213731ac68549b73dfc94c485212d90d505bc0480
|
def testMedianScore_EValue(self):
'\n Sorting on median score must work when scores are bit scores,\n including a secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n') + dumps(RECORD4)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
reads.add(Read('id4', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('medianScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)
|
Sorting on median score must work when scores are bit scores,
including a secondary sort on title.
|
test/diamond/test_titles.py
|
testMedianScore_EValue
|
TaliVeith/dark-matter
| 10
|
python
|
def testMedianScore_EValue(self):
'\n Sorting on median score must work when scores are bit scores,\n including a secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n') + dumps(RECORD4)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
reads.add(Read('id4', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('medianScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)
|
def testMedianScore_EValue(self):
'\n Sorting on median score must work when scores are bit scores,\n including a secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n') + dumps(RECORD4)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
reads.add(Read('id4', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('medianScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)<|docstring|>Sorting on median score must work when scores are bit scores,
including a secondary sort on title.<|endoftext|>
|
5dba2c4e57201e09b01f5e26c0d80eba7285d61271c0e5b3854fb696d1f0b4e7
|
def testMaxScore_Bits(self):
'\n Sorting on max score must work when scores are bit scores, including a\n secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('maxScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'], result)
|
Sorting on max score must work when scores are bit scores, including a
secondary sort on title.
|
test/diamond/test_titles.py
|
testMaxScore_Bits
|
TaliVeith/dark-matter
| 10
|
python
|
def testMaxScore_Bits(self):
'\n Sorting on max score must work when scores are bit scores, including a\n secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('maxScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'], result)
|
def testMaxScore_Bits(self):
'\n Sorting on max score must work when scores are bit scores, including a\n secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('maxScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'], result)<|docstring|>Sorting on max score must work when scores are bit scores, including a
secondary sort on title.<|endoftext|>
|
26166291769342def26b2f4b171d31ceb22567118b2229e47eab3a6014ad461a
|
def testMaxScore_EValue(self):
'\n Sorting on max score must work when scores are e values, including a\n secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('maxScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)
|
Sorting on max score must work when scores are e values, including a
secondary sort on title.
|
test/diamond/test_titles.py
|
testMaxScore_EValue
|
TaliVeith/dark-matter
| 10
|
python
|
def testMaxScore_EValue(self):
'\n Sorting on max score must work when scores are e values, including a\n secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('maxScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)
|
def testMaxScore_EValue(self):
'\n Sorting on max score must work when scores are e values, including a\n secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('maxScore')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)<|docstring|>Sorting on max score must work when scores are e values, including a
secondary sort on title.<|endoftext|>
|
e6af64cf244177e0972fa1f0f6cc3d1c04da5a3217e0bff77757d6203c3101a1
|
def testReadCount(self):
'\n Sorting on read count must work, including a secondary sort on title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('readCount')
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], result)
|
Sorting on read count must work, including a secondary sort on title.
|
test/diamond/test_titles.py
|
testReadCount
|
TaliVeith/dark-matter
| 10
|
python
|
def testReadCount(self):
'\n \n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('readCount')
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], result)
|
def testReadCount(self):
'\n \n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('readCount')
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], result)<|docstring|>Sorting on read count must work, including a secondary sort on title.<|endoftext|>
|
b1d2533bc5417c791145954b3d85b88da22816f2be7b522d2b7279b0c58789aa
|
def testLength(self):
'\n Sorting on sequence length must work, including a secondary sort on\n title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('length')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)
|
Sorting on sequence length must work, including a secondary sort on
title.
|
test/diamond/test_titles.py
|
testLength
|
TaliVeith/dark-matter
| 10
|
python
|
def testLength(self):
'\n Sorting on sequence length must work, including a secondary sort on\n title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('length')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)
|
def testLength(self):
'\n Sorting on sequence length must work, including a secondary sort on\n title.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('length')
self.assertEqual(['gi|887699|gb|DQ37780 Squirrelpox virus 55', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Cowpox virus 15'], result)<|docstring|>Sorting on sequence length must work, including a secondary sort on
title.<|endoftext|>
|
0a00d998bade752b3685cee91db23fbaf4fe09c6f1f3131e9b579c8ff25e4952
|
def testTitle(self):
'\n Sorting on title must work.\n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('title')
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], result)
|
Sorting on title must work.
|
test/diamond/test_titles.py
|
testTitle
|
TaliVeith/dark-matter
| 10
|
python
|
def testTitle(self):
'\n \n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('title')
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], result)
|
def testTitle(self):
'\n \n '
mockOpener = mock_open(read_data=(((((((((dumps(PARAMS) + '\n') + dumps(RECORD0)) + '\n') + dumps(RECORD1)) + '\n') + dumps(RECORD2)) + '\n') + dumps(RECORD3)) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', ('A' * 70)))
reads.add(Read('id1', ('A' * 70)))
reads.add(Read('id2', ('A' * 70)))
reads.add(Read('id3', ('A' * 70)))
readsAlignments = DiamondReadsAlignments(reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('title')
self.assertEqual(['gi|887699|gb|DQ37780 Cowpox virus 15', 'gi|887699|gb|DQ37780 Monkeypox virus 456', 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', 'gi|887699|gb|DQ37780 Squirrelpox virus 55'], result)<|docstring|>Sorting on title must work.<|endoftext|>
|
23a8acd83b4bd71a6a3a2f8f645cfef5e498d1e5fce489483aeb6ebcc8dbff2c
|
def time_function(func):
'\n A method decorator that logs the time taken to execute a given method\n '
if inspect.iscoroutinefunction(func):
@wraps(func)
async def invoke_method_with_timer(*args, **kwargs):
stopwatch = _begin_stopwatch()
try:
return (await func(*args, **kwargs))
finally:
_log_time(stopwatch.stop_timer(), func.__name__)
else:
@wraps(func)
def invoke_method_with_timer(*args, **kwargs):
stopwatch = _begin_stopwatch()
try:
return func(*args, **kwargs)
finally:
_log_time(stopwatch.stop_timer(), func.__name__)
return invoke_method_with_timer
|
A method decorator that logs the time taken to execute a given method
|
common/utilities/timing.py
|
time_function
|
uk-gov-mirror/nhsconnect.integration-adaptor-mhs
| 15
|
python
|
def time_function(func):
'\n \n '
if inspect.iscoroutinefunction(func):
@wraps(func)
async def invoke_method_with_timer(*args, **kwargs):
stopwatch = _begin_stopwatch()
try:
return (await func(*args, **kwargs))
finally:
_log_time(stopwatch.stop_timer(), func.__name__)
else:
@wraps(func)
def invoke_method_with_timer(*args, **kwargs):
stopwatch = _begin_stopwatch()
try:
return func(*args, **kwargs)
finally:
_log_time(stopwatch.stop_timer(), func.__name__)
return invoke_method_with_timer
|
def time_function(func):
'\n \n '
if inspect.iscoroutinefunction(func):
@wraps(func)
async def invoke_method_with_timer(*args, **kwargs):
stopwatch = _begin_stopwatch()
try:
return (await func(*args, **kwargs))
finally:
_log_time(stopwatch.stop_timer(), func.__name__)
else:
@wraps(func)
def invoke_method_with_timer(*args, **kwargs):
stopwatch = _begin_stopwatch()
try:
return func(*args, **kwargs)
finally:
_log_time(stopwatch.stop_timer(), func.__name__)
return invoke_method_with_timer<|docstring|>A method decorator that logs the time taken to execute a given method<|endoftext|>
|
caefe515592a870f673797b42d0467e8e90387500e243cb40a04e98baa89b08e
|
def time_request(func):
'\n A method to be used with tornado end points to extract their calling details and time their execution, this\n mainly holds as a placeholder if any extra data is required from the call\n '
if inspect.iscoroutinefunction(func):
@wraps(func)
async def method_wrapper(*args, **kwargs):
handler = args[0]
stopwatch = _begin_stopwatch()
try:
return (await func(*args, **kwargs))
finally:
_log_tornado_time(stopwatch.stop_timer(), handler.__class__.__name__, handler.request.method.lower())
return method_wrapper
else:
@wraps(func)
def method_wrapper(*args, **kwargs):
handler = args[0]
stopwatch = _begin_stopwatch()
try:
return func(*args, **kwargs)
finally:
_log_tornado_time(stopwatch.stop_timer(), handler.__class__.__name__, handler.request.method.lower())
return method_wrapper
|
A method to be used with tornado end points to extract their calling details and time their execution, this
mainly holds as a placeholder if any extra data is required from the call
|
common/utilities/timing.py
|
time_request
|
uk-gov-mirror/nhsconnect.integration-adaptor-mhs
| 15
|
python
|
def time_request(func):
'\n A method to be used with tornado end points to extract their calling details and time their execution, this\n mainly holds as a placeholder if any extra data is required from the call\n '
if inspect.iscoroutinefunction(func):
@wraps(func)
async def method_wrapper(*args, **kwargs):
handler = args[0]
stopwatch = _begin_stopwatch()
try:
return (await func(*args, **kwargs))
finally:
_log_tornado_time(stopwatch.stop_timer(), handler.__class__.__name__, handler.request.method.lower())
return method_wrapper
else:
@wraps(func)
def method_wrapper(*args, **kwargs):
handler = args[0]
stopwatch = _begin_stopwatch()
try:
return func(*args, **kwargs)
finally:
_log_tornado_time(stopwatch.stop_timer(), handler.__class__.__name__, handler.request.method.lower())
return method_wrapper
|
def time_request(func):
'\n A method to be used with tornado end points to extract their calling details and time their execution, this\n mainly holds as a placeholder if any extra data is required from the call\n '
if inspect.iscoroutinefunction(func):
@wraps(func)
async def method_wrapper(*args, **kwargs):
handler = args[0]
stopwatch = _begin_stopwatch()
try:
return (await func(*args, **kwargs))
finally:
_log_tornado_time(stopwatch.stop_timer(), handler.__class__.__name__, handler.request.method.lower())
return method_wrapper
else:
@wraps(func)
def method_wrapper(*args, **kwargs):
handler = args[0]
stopwatch = _begin_stopwatch()
try:
return func(*args, **kwargs)
finally:
_log_tornado_time(stopwatch.stop_timer(), handler.__class__.__name__, handler.request.method.lower())
return method_wrapper<|docstring|>A method to be used with tornado end points to extract their calling details and time their execution, this
mainly holds as a placeholder if any extra data is required from the call<|endoftext|>
|
28099d73bb406482d363cac4b77f12443f7209bc51ea0357e3ad4f7c943ee79e
|
def get_time() -> str:
'Returns UTC time in the appropriate format '
return datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
|
Returns UTC time in the appropriate format
|
common/utilities/timing.py
|
get_time
|
uk-gov-mirror/nhsconnect.integration-adaptor-mhs
| 15
|
python
|
def get_time() -> str:
' '
return datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
|
def get_time() -> str:
' '
return datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')<|docstring|>Returns UTC time in the appropriate format<|endoftext|>
|
683862d8e1e62c4fec2efbda04fa0e22ab38a54fbe1fc7c80952bf3f7ff18c1f
|
def init(self, board=None):
'使用棋盘矩阵初始化'
if (board is None):
self.logic = ReversiLogic(self.n)
else:
self.logic.set_pieces(board)
return self.logic.pieces
|
使用棋盘矩阵初始化
|
src/games/reversi/reversi_game.py
|
init
|
im0qianqian/Reversi-based-RL
| 61
|
python
|
def init(self, board=None):
if (board is None):
self.logic = ReversiLogic(self.n)
else:
self.logic.set_pieces(board)
return self.logic.pieces
|
def init(self, board=None):
if (board is None):
self.logic = ReversiLogic(self.n)
else:
self.logic.set_pieces(board)
return self.logic.pieces<|docstring|>使用棋盘矩阵初始化<|endoftext|>
|
c77d7989ce22b6c2578302ebcfa705d6314968467fb623ab77b7418b0f91aa65
|
def display(self, board):
'打印当前棋盘状态'
self.init(board=board)
self.logic.display()
|
打印当前棋盘状态
|
src/games/reversi/reversi_game.py
|
display
|
im0qianqian/Reversi-based-RL
| 61
|
python
|
def display(self, board):
self.init(board=board)
self.logic.display()
|
def display(self, board):
self.init(board=board)
self.logic.display()<|docstring|>打印当前棋盘状态<|endoftext|>
|
a99217c6c849137d6f598e05720d887ddc083d7b7b51e08d82be22a7a4579019
|
def get_action_size(self):
'获取动作总数,其中 self.n ** 2 为走棋,剩下一个为无路可走'
return ((self.n ** 2) + 1)
|
获取动作总数,其中 self.n ** 2 为走棋,剩下一个为无路可走
|
src/games/reversi/reversi_game.py
|
get_action_size
|
im0qianqian/Reversi-based-RL
| 61
|
python
|
def get_action_size(self):
return ((self.n ** 2) + 1)
|
def get_action_size(self):
return ((self.n ** 2) + 1)<|docstring|>获取动作总数,其中 self.n ** 2 为走棋,剩下一个为无路可走<|endoftext|>
|
492a3660141abd18ab11f3294474e65248f2908a0d14361d4edebd0607bd31f2
|
def get_winner(self, board):
'获取游戏是否结束等'
self.init(board=board)
if len(self.logic.get_legal_moves(1)):
return self.WinnerState.GAME_RUNNING
if len(self.logic.get_legal_moves((- 1))):
return self.WinnerState.GAME_RUNNING
player1_count = self.logic.count(1)
player2_count = self.logic.count((- 1))
if (player1_count == player2_count):
return self.WinnerState.DRAW
elif (player1_count > player2_count):
return self.WinnerState.PLAYER1_WIN
else:
return self.WinnerState.PLAYER2_WIN
|
获取游戏是否结束等
|
src/games/reversi/reversi_game.py
|
get_winner
|
im0qianqian/Reversi-based-RL
| 61
|
python
|
def get_winner(self, board):
self.init(board=board)
if len(self.logic.get_legal_moves(1)):
return self.WinnerState.GAME_RUNNING
if len(self.logic.get_legal_moves((- 1))):
return self.WinnerState.GAME_RUNNING
player1_count = self.logic.count(1)
player2_count = self.logic.count((- 1))
if (player1_count == player2_count):
return self.WinnerState.DRAW
elif (player1_count > player2_count):
return self.WinnerState.PLAYER1_WIN
else:
return self.WinnerState.PLAYER2_WIN
|
def get_winner(self, board):
self.init(board=board)
if len(self.logic.get_legal_moves(1)):
return self.WinnerState.GAME_RUNNING
if len(self.logic.get_legal_moves((- 1))):
return self.WinnerState.GAME_RUNNING
player1_count = self.logic.count(1)
player2_count = self.logic.count((- 1))
if (player1_count == player2_count):
return self.WinnerState.DRAW
elif (player1_count > player2_count):
return self.WinnerState.PLAYER1_WIN
else:
return self.WinnerState.PLAYER2_WIN<|docstring|>获取游戏是否结束等<|endoftext|>
|
1b7cecaf32d8738bb3cb229586290979c2ca7aec1364c2709232c290d802a761
|
def get_legal_moves(self, player, board):
'获取行动力矩阵'
self.init(board=board)
legal_moves = self.logic.get_legal_moves(player)
res = np.zeros(self.get_action_size(), dtype=np.int)
if (len(legal_moves) == 0):
res[(- 1)] = 1
for (x, y) in legal_moves:
res[((x * self.n) + y)] = 1
return res
|
获取行动力矩阵
|
src/games/reversi/reversi_game.py
|
get_legal_moves
|
im0qianqian/Reversi-based-RL
| 61
|
python
|
def get_legal_moves(self, player, board):
self.init(board=board)
legal_moves = self.logic.get_legal_moves(player)
res = np.zeros(self.get_action_size(), dtype=np.int)
if (len(legal_moves) == 0):
res[(- 1)] = 1
for (x, y) in legal_moves:
res[((x * self.n) + y)] = 1
return res
|
def get_legal_moves(self, player, board):
self.init(board=board)
legal_moves = self.logic.get_legal_moves(player)
res = np.zeros(self.get_action_size(), dtype=np.int)
if (len(legal_moves) == 0):
res[(- 1)] = 1
for (x, y) in legal_moves:
res[((x * self.n) + y)] = 1
return res<|docstring|>获取行动力矩阵<|endoftext|>
|
c22bd7692100d263623c0827a879ea1aca0d0c3b6d2987435143e55ce9ad73e6
|
def get_current_state(self):
'获取棋盘当前状态'
return self.logic.pieces
|
获取棋盘当前状态
|
src/games/reversi/reversi_game.py
|
get_current_state
|
im0qianqian/Reversi-based-RL
| 61
|
python
|
def get_current_state(self):
return self.logic.pieces
|
def get_current_state(self):
return self.logic.pieces<|docstring|>获取棋盘当前状态<|endoftext|>
|
4e4617341fd9b32cfe5eeed322c80ae075f66ad1f6148aadd04bd03a0ee616b9
|
def get_relative_state(self, player, board):
'获取相对矩阵'
return (player * board)
|
获取相对矩阵
|
src/games/reversi/reversi_game.py
|
get_relative_state
|
im0qianqian/Reversi-based-RL
| 61
|
python
|
def get_relative_state(self, player, board):
return (player * board)
|
def get_relative_state(self, player, board):
return (player * board)<|docstring|>获取相对矩阵<|endoftext|>
|
e58fadc97a416c3109350a849209ba30193bf7c00176b4c6a5c1e4a234974622
|
def get_next_state(self, player, action, board):
'玩家 player 执行 action 后的棋盘状态'
self.init(board=board)
if (0 <= action < (self.n ** 2)):
self.logic.execute_move(((action // self.n), (action % self.n)), player)
return (self.logic.pieces, (- player))
|
玩家 player 执行 action 后的棋盘状态
|
src/games/reversi/reversi_game.py
|
get_next_state
|
im0qianqian/Reversi-based-RL
| 61
|
python
|
def get_next_state(self, player, action, board):
self.init(board=board)
if (0 <= action < (self.n ** 2)):
self.logic.execute_move(((action // self.n), (action % self.n)), player)
return (self.logic.pieces, (- player))
|
def get_next_state(self, player, action, board):
self.init(board=board)
if (0 <= action < (self.n ** 2)):
self.logic.execute_move(((action // self.n), (action % self.n)), player)
return (self.logic.pieces, (- player))<|docstring|>玩家 player 执行 action 后的棋盘状态<|endoftext|>
|
b4022f9b9f9d36ff1c51efb1b89fb418d5e043849635948f36e92e1b4874cdad
|
@PROPERTY.route('/property', methods=['POST'])
@login_required
def property_registration():
'property registration'
try:
data = request.get_json()
property_name = data['property_name']
if (not property_name.strip()):
return (jsonify({'error': 'property name cannot be empty'}), 400)
if (not re.match('^[A-Za-z][a-zA-Z]', property_name)):
return (jsonify({'error': 'input valid property name'}), 400)
cur = INIT_DB.cursor()
cur.execute(("SELECT property_name FROM property WHERE property_name = '%s' " % property_name))
data = cur.fetchone()
print(data)
if (data != None):
return (jsonify({'message': 'property already exists'}), 400)
try:
return PROPERTY_RECORDS.register_property(property_name)
except psycopg2.Error as error:
return jsonify({'error': str(error)})
except KeyError:
return (jsonify({'error': 'a key is missing'}), 400)
except Exception as e:
return (jsonify({'error': str(e)}), 400)
|
property registration
|
app/api/v1/views/property_views.py
|
property_registration
|
MurungaKibaara/Kukodi
| 3
|
python
|
@PROPERTY.route('/property', methods=['POST'])
@login_required
def property_registration():
try:
data = request.get_json()
property_name = data['property_name']
if (not property_name.strip()):
return (jsonify({'error': 'property name cannot be empty'}), 400)
if (not re.match('^[A-Za-z][a-zA-Z]', property_name)):
return (jsonify({'error': 'input valid property name'}), 400)
cur = INIT_DB.cursor()
cur.execute(("SELECT property_name FROM property WHERE property_name = '%s' " % property_name))
data = cur.fetchone()
print(data)
if (data != None):
return (jsonify({'message': 'property already exists'}), 400)
try:
return PROPERTY_RECORDS.register_property(property_name)
except psycopg2.Error as error:
return jsonify({'error': str(error)})
except KeyError:
return (jsonify({'error': 'a key is missing'}), 400)
except Exception as e:
return (jsonify({'error': str(e)}), 400)
|
@PROPERTY.route('/property', methods=['POST'])
@login_required
def property_registration():
try:
data = request.get_json()
property_name = data['property_name']
if (not property_name.strip()):
return (jsonify({'error': 'property name cannot be empty'}), 400)
if (not re.match('^[A-Za-z][a-zA-Z]', property_name)):
return (jsonify({'error': 'input valid property name'}), 400)
cur = INIT_DB.cursor()
cur.execute(("SELECT property_name FROM property WHERE property_name = '%s' " % property_name))
data = cur.fetchone()
print(data)
if (data != None):
return (jsonify({'message': 'property already exists'}), 400)
try:
return PROPERTY_RECORDS.register_property(property_name)
except psycopg2.Error as error:
return jsonify({'error': str(error)})
except KeyError:
return (jsonify({'error': 'a key is missing'}), 400)
except Exception as e:
return (jsonify({'error': str(e)}), 400)<|docstring|>property registration<|endoftext|>
|
43398048924cc4b482f688a676022af1855b78fdd6e7d3375feb654d13d41fb2
|
@PROPERTY.route('/property', methods=['GET'])
def view_all():
'view all properties'
return PROPERTY_RECORDS.view_properties()
|
view all properties
|
app/api/v1/views/property_views.py
|
view_all
|
MurungaKibaara/Kukodi
| 3
|
python
|
@PROPERTY.route('/property', methods=['GET'])
def view_all():
return PROPERTY_RECORDS.view_properties()
|
@PROPERTY.route('/property', methods=['GET'])
def view_all():
return PROPERTY_RECORDS.view_properties()<|docstring|>view all properties<|endoftext|>
|
83259fbefc286be29519d69d1b53a06d681e5f7b0505da350581cd34e0153f39
|
@PROPERTY.route('/property/<int:property_id>', methods=['GET'])
def view_one(property_id):
'view property by property id'
return PROPERTY_RECORDS.view_property(property_id)
|
view property by property id
|
app/api/v1/views/property_views.py
|
view_one
|
MurungaKibaara/Kukodi
| 3
|
python
|
@PROPERTY.route('/property/<int:property_id>', methods=['GET'])
def view_one(property_id):
return PROPERTY_RECORDS.view_property(property_id)
|
@PROPERTY.route('/property/<int:property_id>', methods=['GET'])
def view_one(property_id):
return PROPERTY_RECORDS.view_property(property_id)<|docstring|>view property by property id<|endoftext|>
|
073af212e132237da404cc7bd294a083a062065858d65a63a41976c5044b3382
|
@PROPERTY.route('/property/<string:property_name>', methods=['GET'])
def view_one_by_name(property_name):
'view property by property name'
return PROPERTY_RECORDS.view_property_by_name(property_name)
|
view property by property name
|
app/api/v1/views/property_views.py
|
view_one_by_name
|
MurungaKibaara/Kukodi
| 3
|
python
|
@PROPERTY.route('/property/<string:property_name>', methods=['GET'])
def view_one_by_name(property_name):
return PROPERTY_RECORDS.view_property_by_name(property_name)
|
@PROPERTY.route('/property/<string:property_name>', methods=['GET'])
def view_one_by_name(property_name):
return PROPERTY_RECORDS.view_property_by_name(property_name)<|docstring|>view property by property name<|endoftext|>
|
004584e03e906a26419d690e3d68f63f10e28749eff9d9b19d78ddac61aefda3
|
def parse_else(self, line_num):
" Parses 'else' statement - pretty much validates formating. "
log.debug(self._id, 'IfElseNode: parsing else')
lines = self._context.get_script()
if_line = self._if_script.get_script_line()
else_line = lines[line_num]
if_indent = syntax.get_indent(if_line)
else_indent = syntax.get_indent(else_line)
if (if_indent != else_indent):
self._else_script = None
return
idx = (else_line.string.index(syntax.OP_ELSE) + len(syntax.OP_ELSE))
validate(self._id, self._sline, (else_line[idx:].strip() == syntax.COLON), ((('IfElseNode: expected ' + syntax.COLON) + ' after ') + syntax.OP_ELSE))
validate(self._id, self._sline, ((line_num + 1) < len(lines)), ('IfElseNode: missing script block after ' + syntax.OP_ELSE))
validate(self._id, self._sline, syntax.is_indented_block(lines[line_num:(line_num + 2)]), ('IfElseNode: expected an indented block after ' + syntax.OP_ELSE))
|
Parses 'else' statement - pretty much validates formating.
|
spidy/language/ifelse_node.py
|
parse_else
|
AlexPereverzyev/spidy
| 1
|
python
|
def parse_else(self, line_num):
" "
log.debug(self._id, 'IfElseNode: parsing else')
lines = self._context.get_script()
if_line = self._if_script.get_script_line()
else_line = lines[line_num]
if_indent = syntax.get_indent(if_line)
else_indent = syntax.get_indent(else_line)
if (if_indent != else_indent):
self._else_script = None
return
idx = (else_line.string.index(syntax.OP_ELSE) + len(syntax.OP_ELSE))
validate(self._id, self._sline, (else_line[idx:].strip() == syntax.COLON), ((('IfElseNode: expected ' + syntax.COLON) + ' after ') + syntax.OP_ELSE))
validate(self._id, self._sline, ((line_num + 1) < len(lines)), ('IfElseNode: missing script block after ' + syntax.OP_ELSE))
validate(self._id, self._sline, syntax.is_indented_block(lines[line_num:(line_num + 2)]), ('IfElseNode: expected an indented block after ' + syntax.OP_ELSE))
|
def parse_else(self, line_num):
" "
log.debug(self._id, 'IfElseNode: parsing else')
lines = self._context.get_script()
if_line = self._if_script.get_script_line()
else_line = lines[line_num]
if_indent = syntax.get_indent(if_line)
else_indent = syntax.get_indent(else_line)
if (if_indent != else_indent):
self._else_script = None
return
idx = (else_line.string.index(syntax.OP_ELSE) + len(syntax.OP_ELSE))
validate(self._id, self._sline, (else_line[idx:].strip() == syntax.COLON), ((('IfElseNode: expected ' + syntax.COLON) + ' after ') + syntax.OP_ELSE))
validate(self._id, self._sline, ((line_num + 1) < len(lines)), ('IfElseNode: missing script block after ' + syntax.OP_ELSE))
validate(self._id, self._sline, syntax.is_indented_block(lines[line_num:(line_num + 2)]), ('IfElseNode: expected an indented block after ' + syntax.OP_ELSE))<|docstring|>Parses 'else' statement - pretty much validates formating.<|endoftext|>
|
a271968fd8ac60ba36a2380c19d41caf536e67baa8f1d03a729088fe41867095
|
def test_create_user_with_email_successful(self):
'Test creating a new user with an email is successful'
email = 'example@example.com'
password = 'Testpass123'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
|
Test creating a new user with an email is successful
|
app/core/tests/test_models.py
|
test_create_user_with_email_successful
|
hl2999/recipe-app-api
| 1
|
python
|
def test_create_user_with_email_successful(self):
email = 'example@example.com'
password = 'Testpass123'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
|
def test_create_user_with_email_successful(self):
email = 'example@example.com'
password = 'Testpass123'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))<|docstring|>Test creating a new user with an email is successful<|endoftext|>
|
a1ce4b3a515def5a8503d0150b4e56c7a587c32d791e637c303297ad5f29870c
|
def test_new_user_email_normalized(self):
'Test the email for a new user is normalized'
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
|
Test the email for a new user is normalized
|
app/core/tests/test_models.py
|
test_new_user_email_normalized
|
hl2999/recipe-app-api
| 1
|
python
|
def test_new_user_email_normalized(self):
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
|
def test_new_user_email_normalized(self):
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())<|docstring|>Test the email for a new user is normalized<|endoftext|>
|
7da94b3b00879ffd63c0f7770685fb90aa46e562e2c04cb515b4a962e39b4850
|
def test_new_user_invalid_email(self):
'Test creating user with no email raises error'
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
|
Test creating user with no email raises error
|
app/core/tests/test_models.py
|
test_new_user_invalid_email
|
hl2999/recipe-app-api
| 1
|
python
|
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
|
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')<|docstring|>Test creating user with no email raises error<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.