repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
gpoulter/python-ngram | ngram.py | NGram.add | python | def add(self, item):
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1 | Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam'] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L212-L234 | [
"def key(self, item):\n \"\"\"Get the key string for the item.\n\n >>> from ngram import NGram\n >>> n = NGram(key=lambda x:x[1])\n >>> n.key((3,\"ham\"))\n 'ham'\n \"\"\"\n return self._key(item) if self._key else item\n",
"def pad(self, string):\n \"\"\"Pad a string in preparation for splitting into ngrams.\n\n >>> from ngram import NGram\n >>> n = NGram()\n >>> n.pad('ham')\n '$$ham$$'\n \"\"\"\n return self._padding + string + self._padding\n",
"def _split(self, string):\n \"\"\"Iterates over the ngrams of a string (no padding).\n\n >>> from ngram import NGram\n >>> n = NGram()\n >>> list(n._split(\"hamegg\"))\n ['ham', 'ame', 'meg', 'egg']\n \"\"\"\n for i in range(len(string) - self.N + 1):\n yield string[i:i + self.N]\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.items_sharing_ngrams | python | def items_sharing_ngrams(self, query):
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared | Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L267-L294 | [
"def split(self, string):\n \"\"\"Pads a string and iterates over its ngrams.\n\n >>> from ngram import NGram\n >>> n = NGram()\n >>> list(n.split(\"ham\"))\n ['$$h', '$ha', 'ham', 'am$', 'm$$']\n \"\"\"\n return self._split(self.pad(string))\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.searchitem | python | def searchitem(self, item, threshold=None):
return self.search(self.key(item), threshold) | Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L296-L308 | [
"def key(self, item):\n \"\"\"Get the key string for the item.\n\n >>> from ngram import NGram\n >>> n = NGram(key=lambda x:x[1])\n >>> n.key((3,\"ham\"))\n 'ham'\n \"\"\"\n return self._key(item) if self._key else item\n",
"def search(self, query, threshold=None):\n \"\"\"Search the index for items whose key exceeds threshold\n similarity to the query string.\n\n :param query: returned items will have at least `threshold` \\\n similarity to the query string.\n\n :return: list of pairs of (item, similarity) by decreasing similarity.\n\n >>> from ngram import NGram\n >>> n = NGram([(0, \"SPAM\"), (1, \"SPAN\"), (2, \"EG\")], key=lambda x:x[1])\n >>> sorted(n.search(\"SPA\"))\n [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]\n >>> n.search(\"M\")\n [((0, 'SPAM'), 0.125)]\n >>> n.search(\"EG\")\n [((2, 'EG'), 1.0)]\n \"\"\"\n threshold = threshold if threshold is not None else self.threshold\n results = []\n # Identify possible results\n for match, samegrams in self.items_sharing_ngrams(query).items():\n allgrams = (len(self.pad(query))\n + self.length[match] - (2 * self.N) - samegrams + 2)\n similarity = self.ngram_similarity(samegrams, allgrams, self.warp)\n if similarity >= threshold:\n results.append((match, similarity))\n # Sort results by decreasing similarity\n results.sort(key=lambda x: x[1], reverse=True)\n return results\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.search | python | def search(self, query, threshold=None):
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results | Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L310-L339 | [
"def pad(self, string):\n \"\"\"Pad a string in preparation for splitting into ngrams.\n\n >>> from ngram import NGram\n >>> n = NGram()\n >>> n.pad('ham')\n '$$ham$$'\n \"\"\"\n return self._padding + string + self._padding\n",
"def items_sharing_ngrams(self, query):\n \"\"\"Retrieve the subset of items that share n-grams the query string.\n\n :param query: look up items that share N-grams with this string.\n :return: mapping from matched string to the number of shared N-grams.\n\n >>> from ngram import NGram\n >>> n = NGram([\"ham\",\"spam\",\"eggs\"])\n >>> sorted(n.items_sharing_ngrams(\"mam\").items())\n [('ham', 2), ('spam', 2)]\n \"\"\"\n # From matched string to number of N-grams shared with query string\n shared = {}\n # Dictionary mapping n-gram to string to number of occurrences of that\n # ngram in the string that remain to be matched.\n remaining = {}\n for ngram in self.split(query):\n try:\n for match, count in self._grams[ngram].items():\n remaining.setdefault(ngram, {}).setdefault(match, count)\n # match as many occurrences as exist in matched string\n if remaining[ngram][match] > 0:\n remaining[ngram][match] -= 1\n shared.setdefault(match, 0)\n shared[match] += 1\n except KeyError:\n pass\n return shared\n",
"def ngram_similarity(samegrams, allgrams, warp=1.0):\n \"\"\"Similarity for two sets of n-grams.\n\n :note: ``similarity = (a**e - d**e)/a**e`` where `a` is \\\n \"all n-grams\", `d` is \"different n-grams\" and `e` is the warp.\n\n :param samegrams: number of n-grams shared by the two strings.\n\n :param allgrams: total of the distinct n-grams across the two strings.\n :return: similarity in the range 0.0 to 1.0.\n\n >>> from ngram import NGram\n >>> NGram.ngram_similarity(5, 10)\n 0.5\n >>> NGram.ngram_similarity(5, 10, warp=2)\n 0.75\n >>> NGram.ngram_similarity(5, 10, warp=3)\n 0.875\n >>> NGram.ngram_similarity(2, 4, warp=2)\n 0.75\n >>> NGram.ngram_similarity(3, 4)\n 0.75\n \"\"\"\n if abs(warp - 1.0) < 1e-9:\n similarity = float(samegrams) / allgrams\n else:\n diffgrams = float(allgrams - samegrams)\n similarity = ((allgrams ** warp - diffgrams ** warp)\n / (allgrams ** warp))\n return similarity\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.finditem | python | def finditem(self, item, threshold=None):
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None | Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8) | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L341-L358 | [
"def searchitem(self, item, threshold=None):\n \"\"\"Search the index for items whose key exceeds the threshold\n similarity to the key of the given item.\n\n :return: list of pairs of (item, similarity) by decreasing similarity.\n\n >>> from ngram import NGram\n >>> n = NGram([(0, \"SPAM\"), (1, \"SPAN\"), (2, \"EG\"),\n ... (3, \"SPANN\")], key=lambda x:x[1])\n >>> sorted(n.searchitem((2, \"SPA\"), 0.35))\n [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]\n \"\"\"\n return self.search(self.key(item), threshold)\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.find | python | def find(self, query, threshold=None):
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None | Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8) | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L360-L375 | [
"def search(self, query, threshold=None):\n \"\"\"Search the index for items whose key exceeds threshold\n similarity to the query string.\n\n :param query: returned items will have at least `threshold` \\\n similarity to the query string.\n\n :return: list of pairs of (item, similarity) by decreasing similarity.\n\n >>> from ngram import NGram\n >>> n = NGram([(0, \"SPAM\"), (1, \"SPAN\"), (2, \"EG\")], key=lambda x:x[1])\n >>> sorted(n.search(\"SPA\"))\n [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]\n >>> n.search(\"M\")\n [((0, 'SPAM'), 0.125)]\n >>> n.search(\"EG\")\n [((2, 'EG'), 1.0)]\n \"\"\"\n threshold = threshold if threshold is not None else self.threshold\n results = []\n # Identify possible results\n for match, samegrams in self.items_sharing_ngrams(query).items():\n allgrams = (len(self.pad(query))\n + self.length[match] - (2 * self.N) - samegrams + 2)\n similarity = self.ngram_similarity(samegrams, allgrams, self.warp)\n if similarity >= threshold:\n results.append((match, similarity))\n # Sort results by decreasing similarity\n results.sort(key=lambda x: x[1], reverse=True)\n return results\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.ngram_similarity | python | def ngram_similarity(samegrams, allgrams, warp=1.0):
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity | Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75 | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L378-L407 | null | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.compare | python | def compare(s1, s2, **kwargs):
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0 | Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5 | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L410-L435 | null | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.clear | python | def clear(self):
super(NGram, self).clear()
self._grams = {}
self.length = {} | Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L466-L479 | null | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.union | python | def union(self, *others):
return self.copy(super(NGram, self).union(*others)) | Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam'] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L481-L490 | [
"def copy(self, items=None):\n \"\"\"Return a new NGram object with the same settings, and\n referencing the same items. Copy is shallow in that\n each item is not recursively copied. Optionally specify\n alternate items to populate the copy.\n\n >>> from ngram import NGram\n >>> from copy import deepcopy\n >>> n = NGram(['eggs', 'spam'])\n >>> m = n.copy()\n >>> m.add('ham')\n >>> sorted(list(n))\n ['eggs', 'spam']\n >>> sorted(list(m))\n ['eggs', 'ham', 'spam']\n >>> p = n.copy(['foo', 'bar'])\n >>> sorted(list(p))\n ['bar', 'foo']\n \"\"\"\n return NGram(items if items is not None else self,\n self.threshold, self.warp, self._key,\n self.N, self._pad_len, self._pad_char)\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.difference | python | def difference(self, *others):
return self.copy(super(NGram, self).difference(*others)) | Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs'] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L492-L501 | [
"def copy(self, items=None):\n \"\"\"Return a new NGram object with the same settings, and\n referencing the same items. Copy is shallow in that\n each item is not recursively copied. Optionally specify\n alternate items to populate the copy.\n\n >>> from ngram import NGram\n >>> from copy import deepcopy\n >>> n = NGram(['eggs', 'spam'])\n >>> m = n.copy()\n >>> m.add('ham')\n >>> sorted(list(n))\n ['eggs', 'spam']\n >>> sorted(list(m))\n ['eggs', 'ham', 'spam']\n >>> p = n.copy(['foo', 'bar'])\n >>> sorted(list(p))\n ['bar', 'foo']\n \"\"\"\n return NGram(items if items is not None else self,\n self.threshold, self.warp, self._key,\n self.N, self._pad_len, self._pad_char)\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.intersection | python | def intersection(self, *others):
return self.copy(super(NGram, self).intersection(*others)) | Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam'] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L516-L525 | [
"def copy(self, items=None):\n \"\"\"Return a new NGram object with the same settings, and\n referencing the same items. Copy is shallow in that\n each item is not recursively copied. Optionally specify\n alternate items to populate the copy.\n\n >>> from ngram import NGram\n >>> from copy import deepcopy\n >>> n = NGram(['eggs', 'spam'])\n >>> m = n.copy()\n >>> m.add('ham')\n >>> sorted(list(n))\n ['eggs', 'spam']\n >>> sorted(list(m))\n ['eggs', 'ham', 'spam']\n >>> p = n.copy(['foo', 'bar'])\n >>> sorted(list(p))\n ['bar', 'foo']\n \"\"\"\n return NGram(items if items is not None else self,\n self.threshold, self.warp, self._key,\n self.N, self._pad_len, self._pad_char)\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.intersection_update | python | def intersection_update(self, *others):
self.difference_update(super(NGram, self).difference(*others)) | Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam'] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L527-L537 | [
"def difference_update(self, other):\n \"\"\"Remove from this set all elements from `other` set.\n\n >>> from ngram import NGram\n >>> n = NGram(['spam', 'eggs'])\n >>> other = set(['spam'])\n >>> n.difference_update(other)\n >>> list(n)\n ['eggs']\n \"\"\"\n for item in other:\n self.discard(item)\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.symmetric_difference | python | def symmetric_difference(self, other):
return self.copy(super(NGram, self).symmetric_difference(other)) | Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham'] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L539-L548 | [
"def copy(self, items=None):\n \"\"\"Return a new NGram object with the same settings, and\n referencing the same items. Copy is shallow in that\n each item is not recursively copied. Optionally specify\n alternate items to populate the copy.\n\n >>> from ngram import NGram\n >>> from copy import deepcopy\n >>> n = NGram(['eggs', 'spam'])\n >>> m = n.copy()\n >>> m.add('ham')\n >>> sorted(list(n))\n ['eggs', 'spam']\n >>> sorted(list(m))\n ['eggs', 'ham', 'spam']\n >>> p = n.copy(['foo', 'bar'])\n >>> sorted(list(p))\n ['bar', 'foo']\n \"\"\"\n return NGram(items if items is not None else self,\n self.threshold, self.warp, self._key,\n self.N, self._pad_len, self._pad_char)\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference_update(self, other):
"""Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham']
"""
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) # remove items present in both
|
gpoulter/python-ngram | ngram.py | NGram.symmetric_difference_update | python | def symmetric_difference_update(self, other):
intersection = super(NGram, self).intersection(other)
self.update(other) # add items present in other
self.difference_update(intersection) | Update the set with the symmetric difference of itself and `other`.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.symmetric_difference_update(other)
>>> sorted(list(n))
['eggs', 'ham'] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L550-L562 | [
"def update(self, items):\n \"\"\"Update the set with new items.\n\n >>> from ngram import NGram\n >>> n = NGram([\"spam\"])\n >>> n.update([\"eggs\"])\n >>> sorted(list(n))\n ['eggs', 'spam']\n \"\"\"\n for item in items:\n self.add(item)\n",
"def difference_update(self, other):\n \"\"\"Remove from this set all elements from `other` set.\n\n >>> from ngram import NGram\n >>> n = NGram(['spam', 'eggs'])\n >>> other = set(['spam'])\n >>> n.difference_update(other)\n >>> list(n)\n ['eggs']\n \"\"\"\n for item in other:\n self.discard(item)\n"
] | class NGram(set):
"""A set that supports searching for members by N-gram string similarity.
In Python 2, items should be `unicode` string or a plain ASCII `str`
(bytestring) - do not use UTF-8 or other multi-byte encodings, because
multi-byte characters will be split up.
:type threshold: float in 0.0 ... 1.0
:param threshold: minimum similarity for a string to be considered a match.
:type warp: float in 1.0 ... 3.0
:param warp: use warp greater than 1.0 to increase the similarity of \
shorter string pairs.
:type items: [item, ...]
:param items: iteration of items to index for N-gram search.
:type N: int >= 2
:param N: number of characters per n-gram.
:type pad_len: int in 0 ... N-1
:param pad_len: how many characters padding to add (defaults to N-1).
:type pad_char: str or unicode
:param pad_char: character to use for padding. Default is '$', but \
consider using the\ non-breaking space character, ``u'\\xa0'`` \
(``u"\\u00A0"``).
:type key: function(item) -> str/unicode
:param key: Function to convert items into string, default is no \
conversion. Recommended to use `str` or `unicode` for non-string items. \
Using anonymous function prevents NGram class from being pickled.
Instance variables:
:ivar _grams: For each n-gram, the items containing it and the number of \
times\ the n-gram occurs in the item as ``{str:{item:int, ...}, ...}``.
:ivar length: maps items to length of the padded string representations \
as ``{item:int, ...}``.
"""
def __init__(self, items=None, threshold=0.0, warp=1.0, key=None,
N=3, pad_len=None, pad_char='$', **kwargs):
super(NGram, self).__init__()
if not (0 <= threshold <= 1):
raise ValueError("threshold out of range 0.0 to 1.0: "
+ repr(threshold))
if not (1.0 <= warp <= 3.0):
raise ValueError(
"warp out of range 1.0 to 3.0: " + repr(warp))
if not N >= 1:
raise ValueError("N out of range (should be N >= 1): " + repr(N))
if pad_len is None:
pad_len = N - 1
if not (0 <= pad_len < N):
raise ValueError("pad_len out of range: " + repr(pad_len))
if not len(pad_char) == 1:
raise ValueError(
"pad_char is not single character: " + repr(pad_char))
if key is not None and not callable(key):
raise ValueError("key is not a function: " + repr(key))
self.threshold = threshold
self.warp = warp
self.N = N
self._pad_len = pad_len
self._pad_char = pad_char
self._padding = pad_char * pad_len # derive a padding string
# compatibility shim for 3.1 iconv parameter
if 'iconv' in kwargs:
self._key = kwargs.pop('iconv')
warnings.warn('"iconv" parameter deprecated, use "key" instead.', DeprecationWarning)
# no longer support 3.1 qconv parameter
if 'qconv' in kwargs:
raise ValueError('qconv query conversion parameter unsupported. '
'Please process query to a string before calling .search')
self._key = key
self._grams = {}
self.length = {}
if items:
self.update(items)
def __reduce__(self):
"""Return state information for pickling, no references to this
instance. The key function must be None, a builtin function, or
a named module-level function.
>>> from ngram import NGram
>>> n = NGram([0xDEAD, 0xBEEF], key=hex)
>>> import pickle
>>> p = pickle.dumps(n)
>>> m = pickle.loads(p)
>>> sorted(list(m))
[48879, 57005]
"""
return NGram, (list(self), self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char)
def key(self, item):
"""Get the key string for the item.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> n.key((3,"ham"))
'ham'
"""
return self._key(item) if self._key else item
def pad(self, string):
"""Pad a string in preparation for splitting into ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> n.pad('ham')
'$$ham$$'
"""
return self._padding + string + self._padding
def _split(self, string):
"""Iterates over the ngrams of a string (no padding).
>>> from ngram import NGram
>>> n = NGram()
>>> list(n._split("hamegg"))
['ham', 'ame', 'meg', 'egg']
"""
for i in range(len(string) - self.N + 1):
yield string[i:i + self.N]
def split(self, string):
"""Pads a string and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram()
>>> list(n.split("ham"))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self._split(self.pad(string))
def ngrams(self, string):
"""Alias for 3.1 compatibility, please set pad_len=0 and use split."""
warnings.warn('Method ngram deprecated, use method split with pad_len=0 instead.', DeprecationWarning)
return self._split(string)
def ngrams_pad(self, string):
"""Alias for 3.1 compatibility, please use split instead."""
warnings.warn('Method ngrams_pad deprecated, use method split instead.', DeprecationWarning)
return self.split(string)
def splititem(self, item):
"""Pads the string key of an item and iterates over its ngrams.
>>> from ngram import NGram
>>> n = NGram(key=lambda x:x[1])
>>> item = (3,"ham")
>>> list(n.splititem(item))
['$$h', '$ha', 'ham', 'am$', 'm$$']
"""
return self.split(self.key(item))
def add(self, item):
"""Add an item to the N-gram index (if it has not already been added).
>>> from ngram import NGram
>>> n = NGram()
>>> n.add("ham")
>>> list(n)
['ham']
>>> n.add("spam")
>>> sorted(list(n))
['ham', 'spam']
"""
if item not in self:
# Add the item to the base set
super(NGram, self).add(item)
# Record length of padded string
padded_item = self.pad(self.key(item))
self.length[item] = len(padded_item)
for ngram in self._split(padded_item):
# Add a new n-gram and string to index if necessary
self._grams.setdefault(ngram, {}).setdefault(item, 0)
# Increment number of times the n-gram appears in the string
self._grams[ngram][item] += 1
def remove(self, item):
"""Remove an item from the set. Inverts the add operation.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.remove('spam')
>>> list(n)
['eggs']
"""
if item in self:
super(NGram, self).remove(item)
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
def pop(self):
"""Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> x = n.pop()
>>> len(n)
1
"""
item = super(NGram, self).pop()
del self.length[item]
for ngram in set(self.splititem(item)):
del self._grams[ngram][item]
return item
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared
def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold)
def search(self, query, threshold=None):
"""Search the index for items whose key exceeds threshold
similarity to the query string.
:param query: returned items will have at least `threshold` \
similarity to the query string.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1])
>>> sorted(n.search("SPA"))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
>>> n.search("M")
[((0, 'SPAM'), 0.125)]
>>> n.search("EG")
[((2, 'EG'), 1.0)]
"""
threshold = threshold if threshold is not None else self.threshold
results = []
# Identify possible results
for match, samegrams in self.items_sharing_ngrams(query).items():
allgrams = (len(self.pad(query))
+ self.length[match] - (2 * self.N) - samegrams + 2)
similarity = self.ngram_similarity(samegrams, allgrams, self.warp)
if similarity >= threshold:
results.append((match, similarity))
# Sort results by decreasing similarity
results.sort(key=lambda x: x[1], reverse=True)
return results
def finditem(self, item, threshold=None):
"""Return most similar item to the provided one, or None if
nothing exceeds the threshold.
>>> from ngram import NGram
>>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")],
... key=lambda x:x[1].lower())
>>> n.finditem((3, 'Hom'))
(1, 'Ham')
>>> n.finditem((4, "Oggsy"))
(2, 'Eggsy')
>>> n.finditem((4, "Oggsy"), 0.8)
"""
results = self.searchitem(item, threshold)
if results:
return results[0][0]
else:
return None
def find(self, query, threshold=None):
"""Simply return the best match to the query, None on no match.
>>> from ngram import NGram
>>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1)
>>> n.find('Hom')
'Ham'
>>> n.find("Spom")
'Spam'
>>> n.find("Spom", 0.8)
"""
results = self.search(query, threshold)
if results:
return results[0][0]
else:
return None
@staticmethod
def ngram_similarity(samegrams, allgrams, warp=1.0):
"""Similarity for two sets of n-grams.
:note: ``similarity = (a**e - d**e)/a**e`` where `a` is \
"all n-grams", `d` is "different n-grams" and `e` is the warp.
:param samegrams: number of n-grams shared by the two strings.
:param allgrams: total of the distinct n-grams across the two strings.
:return: similarity in the range 0.0 to 1.0.
>>> from ngram import NGram
>>> NGram.ngram_similarity(5, 10)
0.5
>>> NGram.ngram_similarity(5, 10, warp=2)
0.75
>>> NGram.ngram_similarity(5, 10, warp=3)
0.875
>>> NGram.ngram_similarity(2, 4, warp=2)
0.75
>>> NGram.ngram_similarity(3, 4)
0.75
"""
if abs(warp - 1.0) < 1e-9:
similarity = float(samegrams) / allgrams
else:
diffgrams = float(allgrams - samegrams)
similarity = ((allgrams ** warp - diffgrams ** warp)
/ (allgrams ** warp))
return similarity
@staticmethod
def compare(s1, s2, **kwargs):
"""Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5
"""
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0
### Set operations implemented on top of NGram add/remove
def update(self, items):
"""Update the set with new items.
>>> from ngram import NGram
>>> n = NGram(["spam"])
>>> n.update(["eggs"])
>>> sorted(list(n))
['eggs', 'spam']
"""
for item in items:
self.add(item)
def discard(self, item):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> n.discard('spam')
>>> n.discard('ham')
>>> list(n)
['eggs']
"""
if item in self:
self.remove(item)
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {}
def union(self, *others):
"""Return the union of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.union(b)))
['eggs', 'ham', 'spam']
"""
return self.copy(super(NGram, self).union(*others))
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others))
def difference_update(self, other):
"""Remove from this set all elements from `other` set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam'])
>>> n.difference_update(other)
>>> list(n)
['eggs']
"""
for item in other:
self.discard(item)
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
def intersection_update(self, *others):
"""Update the set with the intersection of itself and other sets.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> other = set(['spam', 'ham'])
>>> n.intersection_update(other)
>>> list(n)
['spam']
"""
self.difference_update(super(NGram, self).difference(*others))
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> sorted(list(a.symmetric_difference(b)))
['eggs', 'ham']
"""
return self.copy(super(NGram, self).symmetric_difference(other))
# remove items present in both
|
willhardy/django-seo | rollyourown/seo/utils.py | LazyList._populate | python | def _populate(self):
if not self._populated:
logging.debug("Populating lazy list %d (%s)" % (id(self), self.__class__.__name__))
try:
self.populate()
self._populated = True
except Exception, e:
logging.debug("Currently unable to populate lazy list: %s" % e) | Populate this list by calling populate(), but only once. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/utils.py#L43-L51 | [
"def populate(self):\n \"\"\" Populates the list.\n This method must be overridden by subclasses.\n It is called once, when items in the list are first accessed.\n \"\"\"\n raise NotImplementedError\n"
] | class LazyList(list):
""" Generic python list which is populated when items are first accessed.
"""
def populate(self):
""" Populates the list.
This method must be overridden by subclasses.
It is called once, when items in the list are first accessed.
"""
raise NotImplementedError
# Ensure list is only populated once
def __init__(self, populate_function=None):
if populate_function is not None:
# TODO: Test this functionality!
self.populate = populate_function
self._populated = False
# Accessing methods that require a populated field
def __len__(self):
self._populate()
return super(LazyList, self).__len__()
def __getitem__(self, key):
self._populate()
return super(LazyList, self).__getitem__(key)
def __setitem__(self, key, value):
self._populate()
return super(LazyList, self).__setitem__(key, value)
def __delitem__(self, key):
self._populate()
return super(LazyList, self).__delitem__(key)
def __iter__(self):
self._populate()
return super(LazyList, self).__iter__()
def __contains__(self, item):
self._populate()
return super(LazyList, self).__contains__(item)
|
willhardy/django-seo | rollyourown/seo/admin.py | _register_admin | python | def _register_admin(admin_site, model, admin_class):
try:
admin_site.register(model, admin_class)
except admin.sites.AlreadyRegistered:
pass | Register model in the admin, ignoring any previously registered models.
Alternatively it could be used in the future to replace a previously
registered model. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L81-L89 | null | # -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
from django.forms.models import fields_for_model
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst
from rollyourown.seo.utils import get_seo_content_types
from rollyourown.seo.systemviews import get_seo_views
# TODO Use groups as fieldsets
# Varients without sites support
class PathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path',)
class ModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id')
class ModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type',)
class ViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', )
# Varients with sites support
class SitePathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_site')
list_filter = ('_site',)
class SiteModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id', '_site')
list_filter = ('_site', '_content_type')
class SiteModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type', '_site')
list_filter = ('_site',)
class SiteViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', '_site')
list_filter = ('_site',)
def register_seo_admin(admin_site, metadata_class):
if metadata_class._meta.use_sites:
path_admin = SitePathMetadataAdmin
model_instance_admin = SiteModelInstanceMetadataAdmin
model_admin = SiteModelMetadataAdmin
view_admin = SiteViewMetadataAdmin
else:
path_admin = PathMetadataAdmin
model_instance_admin = ModelInstanceMetadataAdmin
model_admin = ModelMetadataAdmin
view_admin = ViewMetadataAdmin
class ModelAdmin(model_admin):
form = get_model_form(metadata_class)
class ViewAdmin(view_admin):
form = get_view_form(metadata_class)
class PathAdmin(path_admin):
form = get_path_form(metadata_class)
class ModelInstanceAdmin(model_instance_admin):
pass
_register_admin(admin_site, metadata_class._meta.get_model('path'), PathAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('modelinstance'), ModelInstanceAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('model'), ModelAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('view'), ViewAdmin)
class MetadataFormset(generic.BaseGenericInlineFormSet):
def _construct_form(self, i, **kwargs):
""" Override the method to change the form attribute empty_permitted """
form = super(MetadataFormset, self)._construct_form(i, **kwargs)
# Monkey patch the form to always force a save.
# It's unfortunate, but necessary because we always want an instance
# Affect on performance shouldn't be too great, because ther is only
# ever one metadata attached
form.empty_permitted = False
form.has_changed = lambda: True
# Set a marker on this object to prevent automatic metadata creation
# This is seen by the post_save handler, which then skips this instance.
if self.instance:
self.instance.__seo_metadata_handled = True
return form
def get_inline(metadata_class):
attrs = {
'max_num': 1,
'extra': 1,
'model': metadata_class._meta.get_model('modelinstance'),
'ct_field': "_content_type",
'ct_fk_field': "_object_id",
'formset': MetadataFormset,
}
return type('MetadataInline', (generic.GenericStackedInline,), attrs)
def get_model_form(metadata_class):
model_class = metadata_class._meta.get_model('model')
# Restrict content type choices to the models set in seo_models
content_types = get_seo_content_types(metadata_class._meta.seo_models)
content_type_choices = [(x._get_pk_val(), smart_unicode(x)) for x in ContentType.objects.filter(id__in=content_types)]
# Get a list of fields, with _content_type at the start
important_fields = ['_content_type'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_content_type = forms.ChoiceField(label=capfirst(_("model")), choices=content_type_choices)
class Meta:
model = model_class
fields = _fields
def clean__content_type(self):
value = self.cleaned_data['_content_type']
try:
return ContentType.objects.get(pk=int(value))
except (ContentType.DoesNotExist, ValueError):
raise forms.ValidationError("Invalid ContentType")
return ModelMetadataForm
def get_path_form(metadata_class):
model_class = metadata_class._meta.get_model('path')
# Get a list of fields, with _view at the start
important_fields = ['_path'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def get_view_form(metadata_class):
model_class = metadata_class._meta.get_model('view')
# Restrict content type choices to the models set in seo_models
view_choices = [(key, " ".join(key.split("_"))) for key in get_seo_views(metadata_class)]
view_choices.insert(0, ("", "---------"))
# Get a list of fields, with _view at the start
important_fields = ['_view'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_view = forms.ChoiceField(label=capfirst(_("view")), choices=view_choices, required=False)
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def core_choice_fields(metadata_class):
""" If the 'optional' core fields (_site and _language) are required,
list them here.
"""
fields = []
if metadata_class._meta.use_sites:
fields.append('_site')
if metadata_class._meta.use_i18n:
fields.append('_language')
return fields
def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site):
""" Monkey patch the inline onto the given admin_class instance. """
if model in metadata_class._meta.seo_models:
# *Not* adding to the class attribute "inlines", as this will affect
# all instances from this class. Explicitly adding to instance attribute.
admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class]
# Because we've missed the registration, we need to perform actions
# that were done then (on admin class instantiation)
inline_instance = inline_class(admin_class_instance.model, admin_site)
admin_class_instance.inline_instances.append(inline_instance)
def _with_inline(func, admin_site, metadata_class, inline_class):
""" Decorator for register function that adds an appropriate inline."""
def register(model_or_iterable, admin_class=None, **options):
# Call the (bound) function we were given.
# We have to assume it will be bound to admin_site
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register
def auto_register_inlines(admin_site, metadata_class):
""" This is a questionable function that automatically adds our metadata
inline to all relevant models in the site.
"""
inline_class = get_inline(metadata_class)
for model, admin_class_instance in admin_site._registry.items():
_monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site)
# Monkey patch the register method to automatically add an inline for this site.
# _with_inline() is a decorator that wraps the register function with the same injection code
# used above (_monkey_inline).
admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class)
|
willhardy/django-seo | rollyourown/seo/admin.py | core_choice_fields | python | def core_choice_fields(metadata_class):
fields = []
if metadata_class._meta.use_sites:
fields.append('_site')
if metadata_class._meta.use_i18n:
fields.append('_language')
return fields | If the 'optional' core fields (_site and _language) are required,
list them here. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L187-L196 | null | # -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
from django.forms.models import fields_for_model
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst
from rollyourown.seo.utils import get_seo_content_types
from rollyourown.seo.systemviews import get_seo_views
# TODO Use groups as fieldsets
# Varients without sites support
class PathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path',)
class ModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id')
class ModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type',)
class ViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', )
# Varients with sites support
class SitePathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_site')
list_filter = ('_site',)
class SiteModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id', '_site')
list_filter = ('_site', '_content_type')
class SiteModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type', '_site')
list_filter = ('_site',)
class SiteViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', '_site')
list_filter = ('_site',)
def register_seo_admin(admin_site, metadata_class):
if metadata_class._meta.use_sites:
path_admin = SitePathMetadataAdmin
model_instance_admin = SiteModelInstanceMetadataAdmin
model_admin = SiteModelMetadataAdmin
view_admin = SiteViewMetadataAdmin
else:
path_admin = PathMetadataAdmin
model_instance_admin = ModelInstanceMetadataAdmin
model_admin = ModelMetadataAdmin
view_admin = ViewMetadataAdmin
class ModelAdmin(model_admin):
form = get_model_form(metadata_class)
class ViewAdmin(view_admin):
form = get_view_form(metadata_class)
class PathAdmin(path_admin):
form = get_path_form(metadata_class)
class ModelInstanceAdmin(model_instance_admin):
pass
_register_admin(admin_site, metadata_class._meta.get_model('path'), PathAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('modelinstance'), ModelInstanceAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('model'), ModelAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('view'), ViewAdmin)
def _register_admin(admin_site, model, admin_class):
""" Register model in the admin, ignoring any previously registered models.
Alternatively it could be used in the future to replace a previously
registered model.
"""
try:
admin_site.register(model, admin_class)
except admin.sites.AlreadyRegistered:
pass
class MetadataFormset(generic.BaseGenericInlineFormSet):
def _construct_form(self, i, **kwargs):
""" Override the method to change the form attribute empty_permitted """
form = super(MetadataFormset, self)._construct_form(i, **kwargs)
# Monkey patch the form to always force a save.
# It's unfortunate, but necessary because we always want an instance
# Affect on performance shouldn't be too great, because ther is only
# ever one metadata attached
form.empty_permitted = False
form.has_changed = lambda: True
# Set a marker on this object to prevent automatic metadata creation
# This is seen by the post_save handler, which then skips this instance.
if self.instance:
self.instance.__seo_metadata_handled = True
return form
def get_inline(metadata_class):
attrs = {
'max_num': 1,
'extra': 1,
'model': metadata_class._meta.get_model('modelinstance'),
'ct_field': "_content_type",
'ct_fk_field': "_object_id",
'formset': MetadataFormset,
}
return type('MetadataInline', (generic.GenericStackedInline,), attrs)
def get_model_form(metadata_class):
model_class = metadata_class._meta.get_model('model')
# Restrict content type choices to the models set in seo_models
content_types = get_seo_content_types(metadata_class._meta.seo_models)
content_type_choices = [(x._get_pk_val(), smart_unicode(x)) for x in ContentType.objects.filter(id__in=content_types)]
# Get a list of fields, with _content_type at the start
important_fields = ['_content_type'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_content_type = forms.ChoiceField(label=capfirst(_("model")), choices=content_type_choices)
class Meta:
model = model_class
fields = _fields
def clean__content_type(self):
value = self.cleaned_data['_content_type']
try:
return ContentType.objects.get(pk=int(value))
except (ContentType.DoesNotExist, ValueError):
raise forms.ValidationError("Invalid ContentType")
return ModelMetadataForm
def get_path_form(metadata_class):
model_class = metadata_class._meta.get_model('path')
# Get a list of fields, with _view at the start
important_fields = ['_path'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def get_view_form(metadata_class):
model_class = metadata_class._meta.get_model('view')
# Restrict content type choices to the models set in seo_models
view_choices = [(key, " ".join(key.split("_"))) for key in get_seo_views(metadata_class)]
view_choices.insert(0, ("", "---------"))
# Get a list of fields, with _view at the start
important_fields = ['_view'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_view = forms.ChoiceField(label=capfirst(_("view")), choices=view_choices, required=False)
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site):
""" Monkey patch the inline onto the given admin_class instance. """
if model in metadata_class._meta.seo_models:
# *Not* adding to the class attribute "inlines", as this will affect
# all instances from this class. Explicitly adding to instance attribute.
admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class]
# Because we've missed the registration, we need to perform actions
# that were done then (on admin class instantiation)
inline_instance = inline_class(admin_class_instance.model, admin_site)
admin_class_instance.inline_instances.append(inline_instance)
def _with_inline(func, admin_site, metadata_class, inline_class):
""" Decorator for register function that adds an appropriate inline."""
def register(model_or_iterable, admin_class=None, **options):
# Call the (bound) function we were given.
# We have to assume it will be bound to admin_site
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register
def auto_register_inlines(admin_site, metadata_class):
""" This is a questionable function that automatically adds our metadata
inline to all relevant models in the site.
"""
inline_class = get_inline(metadata_class)
for model, admin_class_instance in admin_site._registry.items():
_monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site)
# Monkey patch the register method to automatically add an inline for this site.
# _with_inline() is a decorator that wraps the register function with the same injection code
# used above (_monkey_inline).
admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class)
|
willhardy/django-seo | rollyourown/seo/admin.py | _monkey_inline | python | def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site):
if model in metadata_class._meta.seo_models:
# *Not* adding to the class attribute "inlines", as this will affect
# all instances from this class. Explicitly adding to instance attribute.
admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class]
# Because we've missed the registration, we need to perform actions
# that were done then (on admin class instantiation)
inline_instance = inline_class(admin_class_instance.model, admin_site)
admin_class_instance.inline_instances.append(inline_instance) | Monkey patch the inline onto the given admin_class instance. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L199-L209 | null | # -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
from django.forms.models import fields_for_model
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst
from rollyourown.seo.utils import get_seo_content_types
from rollyourown.seo.systemviews import get_seo_views
# TODO Use groups as fieldsets
# Varients without sites support
class PathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path',)
class ModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id')
class ModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type',)
class ViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', )
# Varients with sites support
class SitePathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_site')
list_filter = ('_site',)
class SiteModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id', '_site')
list_filter = ('_site', '_content_type')
class SiteModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type', '_site')
list_filter = ('_site',)
class SiteViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', '_site')
list_filter = ('_site',)
def register_seo_admin(admin_site, metadata_class):
if metadata_class._meta.use_sites:
path_admin = SitePathMetadataAdmin
model_instance_admin = SiteModelInstanceMetadataAdmin
model_admin = SiteModelMetadataAdmin
view_admin = SiteViewMetadataAdmin
else:
path_admin = PathMetadataAdmin
model_instance_admin = ModelInstanceMetadataAdmin
model_admin = ModelMetadataAdmin
view_admin = ViewMetadataAdmin
class ModelAdmin(model_admin):
form = get_model_form(metadata_class)
class ViewAdmin(view_admin):
form = get_view_form(metadata_class)
class PathAdmin(path_admin):
form = get_path_form(metadata_class)
class ModelInstanceAdmin(model_instance_admin):
pass
_register_admin(admin_site, metadata_class._meta.get_model('path'), PathAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('modelinstance'), ModelInstanceAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('model'), ModelAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('view'), ViewAdmin)
def _register_admin(admin_site, model, admin_class):
""" Register model in the admin, ignoring any previously registered models.
Alternatively it could be used in the future to replace a previously
registered model.
"""
try:
admin_site.register(model, admin_class)
except admin.sites.AlreadyRegistered:
pass
class MetadataFormset(generic.BaseGenericInlineFormSet):
def _construct_form(self, i, **kwargs):
""" Override the method to change the form attribute empty_permitted """
form = super(MetadataFormset, self)._construct_form(i, **kwargs)
# Monkey patch the form to always force a save.
# It's unfortunate, but necessary because we always want an instance
# Affect on performance shouldn't be too great, because ther is only
# ever one metadata attached
form.empty_permitted = False
form.has_changed = lambda: True
# Set a marker on this object to prevent automatic metadata creation
# This is seen by the post_save handler, which then skips this instance.
if self.instance:
self.instance.__seo_metadata_handled = True
return form
def get_inline(metadata_class):
attrs = {
'max_num': 1,
'extra': 1,
'model': metadata_class._meta.get_model('modelinstance'),
'ct_field': "_content_type",
'ct_fk_field': "_object_id",
'formset': MetadataFormset,
}
return type('MetadataInline', (generic.GenericStackedInline,), attrs)
def get_model_form(metadata_class):
model_class = metadata_class._meta.get_model('model')
# Restrict content type choices to the models set in seo_models
content_types = get_seo_content_types(metadata_class._meta.seo_models)
content_type_choices = [(x._get_pk_val(), smart_unicode(x)) for x in ContentType.objects.filter(id__in=content_types)]
# Get a list of fields, with _content_type at the start
important_fields = ['_content_type'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_content_type = forms.ChoiceField(label=capfirst(_("model")), choices=content_type_choices)
class Meta:
model = model_class
fields = _fields
def clean__content_type(self):
value = self.cleaned_data['_content_type']
try:
return ContentType.objects.get(pk=int(value))
except (ContentType.DoesNotExist, ValueError):
raise forms.ValidationError("Invalid ContentType")
return ModelMetadataForm
def get_path_form(metadata_class):
model_class = metadata_class._meta.get_model('path')
# Get a list of fields, with _view at the start
important_fields = ['_path'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def get_view_form(metadata_class):
model_class = metadata_class._meta.get_model('view')
# Restrict content type choices to the models set in seo_models
view_choices = [(key, " ".join(key.split("_"))) for key in get_seo_views(metadata_class)]
view_choices.insert(0, ("", "---------"))
# Get a list of fields, with _view at the start
important_fields = ['_view'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_view = forms.ChoiceField(label=capfirst(_("view")), choices=view_choices, required=False)
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def core_choice_fields(metadata_class):
""" If the 'optional' core fields (_site and _language) are required,
list them here.
"""
fields = []
if metadata_class._meta.use_sites:
fields.append('_site')
if metadata_class._meta.use_i18n:
fields.append('_language')
return fields
def _with_inline(func, admin_site, metadata_class, inline_class):
""" Decorator for register function that adds an appropriate inline."""
def register(model_or_iterable, admin_class=None, **options):
# Call the (bound) function we were given.
# We have to assume it will be bound to admin_site
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register
def auto_register_inlines(admin_site, metadata_class):
""" This is a questionable function that automatically adds our metadata
inline to all relevant models in the site.
"""
inline_class = get_inline(metadata_class)
for model, admin_class_instance in admin_site._registry.items():
_monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site)
# Monkey patch the register method to automatically add an inline for this site.
# _with_inline() is a decorator that wraps the register function with the same injection code
# used above (_monkey_inline).
admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class)
|
willhardy/django-seo | rollyourown/seo/admin.py | _with_inline | python | def _with_inline(func, admin_site, metadata_class, inline_class):
""" Decorator for register function that adds an appropriate inline."""
def register(model_or_iterable, admin_class=None, **options):
# Call the (bound) function we were given.
# We have to assume it will be bound to admin_site
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register | Decorator for register function that adds an appropriate inline. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L211-L220 | null | # -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
from django.forms.models import fields_for_model
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst
from rollyourown.seo.utils import get_seo_content_types
from rollyourown.seo.systemviews import get_seo_views
# TODO Use groups as fieldsets
# Varients without sites support
class PathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path',)
class ModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id')
class ModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type',)
class ViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', )
# Varients with sites support
class SitePathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_site')
list_filter = ('_site',)
class SiteModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id', '_site')
list_filter = ('_site', '_content_type')
class SiteModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type', '_site')
list_filter = ('_site',)
class SiteViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', '_site')
list_filter = ('_site',)
def register_seo_admin(admin_site, metadata_class):
if metadata_class._meta.use_sites:
path_admin = SitePathMetadataAdmin
model_instance_admin = SiteModelInstanceMetadataAdmin
model_admin = SiteModelMetadataAdmin
view_admin = SiteViewMetadataAdmin
else:
path_admin = PathMetadataAdmin
model_instance_admin = ModelInstanceMetadataAdmin
model_admin = ModelMetadataAdmin
view_admin = ViewMetadataAdmin
class ModelAdmin(model_admin):
form = get_model_form(metadata_class)
class ViewAdmin(view_admin):
form = get_view_form(metadata_class)
class PathAdmin(path_admin):
form = get_path_form(metadata_class)
class ModelInstanceAdmin(model_instance_admin):
pass
_register_admin(admin_site, metadata_class._meta.get_model('path'), PathAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('modelinstance'), ModelInstanceAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('model'), ModelAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('view'), ViewAdmin)
def _register_admin(admin_site, model, admin_class):
""" Register model in the admin, ignoring any previously registered models.
Alternatively it could be used in the future to replace a previously
registered model.
"""
try:
admin_site.register(model, admin_class)
except admin.sites.AlreadyRegistered:
pass
class MetadataFormset(generic.BaseGenericInlineFormSet):
def _construct_form(self, i, **kwargs):
""" Override the method to change the form attribute empty_permitted """
form = super(MetadataFormset, self)._construct_form(i, **kwargs)
# Monkey patch the form to always force a save.
# It's unfortunate, but necessary because we always want an instance
# Affect on performance shouldn't be too great, because ther is only
# ever one metadata attached
form.empty_permitted = False
form.has_changed = lambda: True
# Set a marker on this object to prevent automatic metadata creation
# This is seen by the post_save handler, which then skips this instance.
if self.instance:
self.instance.__seo_metadata_handled = True
return form
def get_inline(metadata_class):
attrs = {
'max_num': 1,
'extra': 1,
'model': metadata_class._meta.get_model('modelinstance'),
'ct_field': "_content_type",
'ct_fk_field': "_object_id",
'formset': MetadataFormset,
}
return type('MetadataInline', (generic.GenericStackedInline,), attrs)
def get_model_form(metadata_class):
model_class = metadata_class._meta.get_model('model')
# Restrict content type choices to the models set in seo_models
content_types = get_seo_content_types(metadata_class._meta.seo_models)
content_type_choices = [(x._get_pk_val(), smart_unicode(x)) for x in ContentType.objects.filter(id__in=content_types)]
# Get a list of fields, with _content_type at the start
important_fields = ['_content_type'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_content_type = forms.ChoiceField(label=capfirst(_("model")), choices=content_type_choices)
class Meta:
model = model_class
fields = _fields
def clean__content_type(self):
value = self.cleaned_data['_content_type']
try:
return ContentType.objects.get(pk=int(value))
except (ContentType.DoesNotExist, ValueError):
raise forms.ValidationError("Invalid ContentType")
return ModelMetadataForm
def get_path_form(metadata_class):
model_class = metadata_class._meta.get_model('path')
# Get a list of fields, with _view at the start
important_fields = ['_path'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def get_view_form(metadata_class):
model_class = metadata_class._meta.get_model('view')
# Restrict content type choices to the models set in seo_models
view_choices = [(key, " ".join(key.split("_"))) for key in get_seo_views(metadata_class)]
view_choices.insert(0, ("", "---------"))
# Get a list of fields, with _view at the start
important_fields = ['_view'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_view = forms.ChoiceField(label=capfirst(_("view")), choices=view_choices, required=False)
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def core_choice_fields(metadata_class):
""" If the 'optional' core fields (_site and _language) are required,
list them here.
"""
fields = []
if metadata_class._meta.use_sites:
fields.append('_site')
if metadata_class._meta.use_i18n:
fields.append('_language')
return fields
def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site):
""" Monkey patch the inline onto the given admin_class instance. """
if model in metadata_class._meta.seo_models:
# *Not* adding to the class attribute "inlines", as this will affect
# all instances from this class. Explicitly adding to instance attribute.
admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class]
# Because we've missed the registration, we need to perform actions
# that were done then (on admin class instantiation)
inline_instance = inline_class(admin_class_instance.model, admin_site)
admin_class_instance.inline_instances.append(inline_instance)
def auto_register_inlines(admin_site, metadata_class):
""" This is a questionable function that automatically adds our metadata
inline to all relevant models in the site.
"""
inline_class = get_inline(metadata_class)
for model, admin_class_instance in admin_site._registry.items():
_monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site)
# Monkey patch the register method to automatically add an inline for this site.
# _with_inline() is a decorator that wraps the register function with the same injection code
# used above (_monkey_inline).
admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class)
|
willhardy/django-seo | rollyourown/seo/admin.py | auto_register_inlines | python | def auto_register_inlines(admin_site, metadata_class):
inline_class = get_inline(metadata_class)
for model, admin_class_instance in admin_site._registry.items():
_monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site)
# Monkey patch the register method to automatically add an inline for this site.
# _with_inline() is a decorator that wraps the register function with the same injection code
# used above (_monkey_inline).
admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class) | This is a questionable function that automatically adds our metadata
inline to all relevant models in the site. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L222-L234 | [
"def get_inline(metadata_class):\n attrs = {\n 'max_num': 1, \n 'extra': 1, \n 'model': metadata_class._meta.get_model('modelinstance'), \n 'ct_field': \"_content_type\",\n 'ct_fk_field': \"_object_id\",\n 'formset': MetadataFormset,\n }\n return type('MetadataInline', (generic.GenericStackedInline,), attrs)\n",
"def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site):\n \"\"\" Monkey patch the inline onto the given admin_class instance. \"\"\"\n if model in metadata_class._meta.seo_models:\n # *Not* adding to the class attribute \"inlines\", as this will affect\n # all instances from this class. Explicitly adding to instance attribute.\n admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class]\n\n # Because we've missed the registration, we need to perform actions\n # that were done then (on admin class instantiation)\n inline_instance = inline_class(admin_class_instance.model, admin_site)\n admin_class_instance.inline_instances.append(inline_instance)\n",
"def _with_inline(func, admin_site, metadata_class, inline_class):\n \"\"\" Decorator for register function that adds an appropriate inline.\"\"\" \n\n def register(model_or_iterable, admin_class=None, **options):\n # Call the (bound) function we were given.\n # We have to assume it will be bound to admin_site\n func(model_or_iterable, admin_class, **options)\n _monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)\n\n return register\n"
] | # -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
from django.forms.models import fields_for_model
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst
from rollyourown.seo.utils import get_seo_content_types
from rollyourown.seo.systemviews import get_seo_views
# TODO Use groups as fieldsets
# Varients without sites support
class PathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path',)
class ModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id')
class ModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type',)
class ViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', )
# Varients with sites support
class SitePathMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_site')
list_filter = ('_site',)
class SiteModelInstanceMetadataAdmin(admin.ModelAdmin):
list_display = ('_path', '_content_type', '_object_id', '_site')
list_filter = ('_site', '_content_type')
class SiteModelMetadataAdmin(admin.ModelAdmin):
list_display = ('_content_type', '_site')
list_filter = ('_site',)
class SiteViewMetadataAdmin(admin.ModelAdmin):
list_display = ('_view', '_site')
list_filter = ('_site',)
def register_seo_admin(admin_site, metadata_class):
if metadata_class._meta.use_sites:
path_admin = SitePathMetadataAdmin
model_instance_admin = SiteModelInstanceMetadataAdmin
model_admin = SiteModelMetadataAdmin
view_admin = SiteViewMetadataAdmin
else:
path_admin = PathMetadataAdmin
model_instance_admin = ModelInstanceMetadataAdmin
model_admin = ModelMetadataAdmin
view_admin = ViewMetadataAdmin
class ModelAdmin(model_admin):
form = get_model_form(metadata_class)
class ViewAdmin(view_admin):
form = get_view_form(metadata_class)
class PathAdmin(path_admin):
form = get_path_form(metadata_class)
class ModelInstanceAdmin(model_instance_admin):
pass
_register_admin(admin_site, metadata_class._meta.get_model('path'), PathAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('modelinstance'), ModelInstanceAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('model'), ModelAdmin)
_register_admin(admin_site, metadata_class._meta.get_model('view'), ViewAdmin)
def _register_admin(admin_site, model, admin_class):
""" Register model in the admin, ignoring any previously registered models.
Alternatively it could be used in the future to replace a previously
registered model.
"""
try:
admin_site.register(model, admin_class)
except admin.sites.AlreadyRegistered:
pass
class MetadataFormset(generic.BaseGenericInlineFormSet):
def _construct_form(self, i, **kwargs):
""" Override the method to change the form attribute empty_permitted """
form = super(MetadataFormset, self)._construct_form(i, **kwargs)
# Monkey patch the form to always force a save.
# It's unfortunate, but necessary because we always want an instance
# Affect on performance shouldn't be too great, because ther is only
# ever one metadata attached
form.empty_permitted = False
form.has_changed = lambda: True
# Set a marker on this object to prevent automatic metadata creation
# This is seen by the post_save handler, which then skips this instance.
if self.instance:
self.instance.__seo_metadata_handled = True
return form
def get_inline(metadata_class):
attrs = {
'max_num': 1,
'extra': 1,
'model': metadata_class._meta.get_model('modelinstance'),
'ct_field': "_content_type",
'ct_fk_field': "_object_id",
'formset': MetadataFormset,
}
return type('MetadataInline', (generic.GenericStackedInline,), attrs)
def get_model_form(metadata_class):
model_class = metadata_class._meta.get_model('model')
# Restrict content type choices to the models set in seo_models
content_types = get_seo_content_types(metadata_class._meta.seo_models)
content_type_choices = [(x._get_pk_val(), smart_unicode(x)) for x in ContentType.objects.filter(id__in=content_types)]
# Get a list of fields, with _content_type at the start
important_fields = ['_content_type'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_content_type = forms.ChoiceField(label=capfirst(_("model")), choices=content_type_choices)
class Meta:
model = model_class
fields = _fields
def clean__content_type(self):
value = self.cleaned_data['_content_type']
try:
return ContentType.objects.get(pk=int(value))
except (ContentType.DoesNotExist, ValueError):
raise forms.ValidationError("Invalid ContentType")
return ModelMetadataForm
def get_path_form(metadata_class):
model_class = metadata_class._meta.get_model('path')
# Get a list of fields, with _view at the start
important_fields = ['_path'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def get_view_form(metadata_class):
model_class = metadata_class._meta.get_model('view')
# Restrict content type choices to the models set in seo_models
view_choices = [(key, " ".join(key.split("_"))) for key in get_seo_views(metadata_class)]
view_choices.insert(0, ("", "---------"))
# Get a list of fields, with _view at the start
important_fields = ['_view'] + core_choice_fields(metadata_class)
_fields = important_fields + fields_for_model(model_class, exclude=important_fields).keys()
class ModelMetadataForm(forms.ModelForm):
_view = forms.ChoiceField(label=capfirst(_("view")), choices=view_choices, required=False)
class Meta:
model = model_class
fields = _fields
return ModelMetadataForm
def core_choice_fields(metadata_class):
""" If the 'optional' core fields (_site and _language) are required,
list them here.
"""
fields = []
if metadata_class._meta.use_sites:
fields.append('_site')
if metadata_class._meta.use_i18n:
fields.append('_language')
return fields
def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site):
""" Monkey patch the inline onto the given admin_class instance. """
if model in metadata_class._meta.seo_models:
# *Not* adding to the class attribute "inlines", as this will affect
# all instances from this class. Explicitly adding to instance attribute.
admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class]
# Because we've missed the registration, we need to perform actions
# that were done then (on admin class instantiation)
inline_instance = inline_class(admin_class_instance.model, admin_site)
admin_class_instance.inline_instances.append(inline_instance)
def _with_inline(func, admin_site, metadata_class, inline_class):
""" Decorator for register function that adds an appropriate inline."""
def register(model_or_iterable, admin_class=None, **options):
# Call the (bound) function we were given.
# We have to assume it will be bound to admin_site
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register
|
willhardy/django-seo | rollyourown/seo/base.py | get_linked_metadata | python | def get_linked_metadata(obj, name=None, context=None, site=None, language=None):
# XXX Check that 'modelinstance' and 'model' metadata are installed in backends
# I believe that get_model() would return None if not
Metadata = _get_metadata_model(name)
InstanceMetadata = Metadata._meta.get_model('modelinstance')
ModelMetadata = Metadata._meta.get_model('model')
content_type = ContentType.objects.get_for_model(obj)
instances = []
if InstanceMetadata is not None:
try:
instance_md = InstanceMetadata.objects.get(_content_type=content_type, _object_id=obj.pk)
except InstanceMetadata.DoesNotExist:
instance_md = InstanceMetadata(_content_object=obj)
instances.append(instance_md)
if ModelMetadata is not None:
try:
model_md = ModelMetadata.objects.get(_content_type=content_type)
except ModelMetadata.DoesNotExist:
model_md = ModelMetadata(_content_type=content_type)
instances.append(model_md)
return FormattedMetadata(Metadata, instances, '', site, language) | Gets metadata linked from the given object. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L262-L283 | [
"def _get_metadata_model(name=None):\n # Find registered Metadata object\n if name is not None:\n try:\n return registry[name]\n except KeyError:\n if len(registry) == 1:\n valid_names = u'Try using the name \"%s\" or simply leaving it out altogether.'% registry.keys()[0]\n else:\n valid_names = u\"Valid names are \" + u\", \".join(u'\"%s\"' % k for k in registry.keys())\n raise Exception(u\"Metadata definition with name \\\"%s\\\" does not exist.\\n%s\" % (name, valid_names))\n else:\n assert len(registry) == 1, \"You must have exactly one Metadata class, if using get_metadata() without a 'name' parameter.\"\n return registry.values()[0]\n"
] | # -*- coding: utf-8 -*-
# TODO:
# * Move/rename namespace polluting attributes
# * Documentation
# * Make backends optional: Meta.backends = (path, modelinstance/model, view)
import hashlib
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.datastructures import SortedDict
from django.utils.functional import curry
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.utils.safestring import mark_safe
from django.core.cache import cache
from django.utils.encoding import iri_to_uri
from rollyourown.seo.utils import NotSet, Literal
from rollyourown.seo.options import Options
from rollyourown.seo.fields import MetadataField, Tag, MetaTag, KeywordTag, Raw
from rollyourown.seo.backends import backend_registry, RESERVED_FIELD_NAMES
registry = SortedDict()
class FormattedMetadata(object):
""" Allows convenient access to selected metadata.
Metadata for each field may be sourced from any one of the relevant instances passed.
"""
def __init__(self, metadata, instances, path, site=None, language=None):
self.__metadata = metadata
if metadata._meta.use_cache:
if metadata._meta.use_sites and site:
hexpath = hashlib.md5(iri_to_uri(site.domain+path)).hexdigest()
else:
hexpath = hashlib.md5(iri_to_uri(path)).hexdigest()
if metadata._meta.use_i18n:
self.__cache_prefix = 'rollyourown.seo.%s.%s.%s' % (self.__metadata.__class__.__name__, hexpath, language)
else:
self.__cache_prefix = 'rollyourown.seo.%s.%s' % (self.__metadata.__class__.__name__, hexpath)
else:
self.__cache_prefix = None
self.__instances_original = instances
self.__instances_cache = []
def __instances(self):
""" Cache instances, allowing generators to be used and reused.
This fills a cache as the generator gets emptied, eventually
reading exclusively from the cache.
"""
for instance in self.__instances_cache:
yield instance
for instance in self.__instances_original:
self.__instances_cache.append(instance)
yield instance
def _resolve_value(self, name):
""" Returns an appropriate value for the given name.
This simply asks each of the instances for a value.
"""
for instance in self.__instances():
value = instance._resolve_value(name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
# TODO: This is duplicated in meta_models. Move this to a common home.
if name in self.__metadata._meta.elements:
populate_from = self.__metadata._meta.elements[name].populate_from
if callable(populate_from):
return populate_from(None)
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from)
def __getattr__(self, name):
# If caching is enabled, work out a key
if self.__cache_prefix:
cache_key = '%s.%s' % (self.__cache_prefix, name)
value = cache.get(cache_key)
else:
cache_key = None
value = None
# Look for a group called "name"
if name in self.__metadata._meta.groups:
if value is not None:
return value or None
value = '\n'.join(unicode(BoundMetadataField(self.__metadata._meta.elements[f], self._resolve_value(f))) for f in self.__metadata._meta.groups[name]).strip()
# Look for an element called "name"
elif name in self.__metadata._meta.elements:
if value is not None:
return BoundMetadataField(self.__metadata._meta.elements[name], value or None)
value = self._resolve_value(name)
if cache_key is not None:
cache.set(cache_key, value or '')
return BoundMetadataField(self.__metadata._meta.elements[name], value)
else:
raise AttributeError
if cache_key is not None:
cache.set(cache_key, value or '')
return value or None
def __unicode__(self):
""" String version of this object is the html output of head elements. """
if self.__cache_prefix is not None:
value = cache.get(self.__cache_prefix)
else:
value = None
if value is None:
value = mark_safe(u'\n'.join(unicode(getattr(self, f)) for f,e in self.__metadata._meta.elements.items() if e.head))
if self.__cache_prefix is not None:
cache.set(self.__cache_prefix, value or '')
return value
class BoundMetadataField(object):
""" An object to help provide templates with access to a "bound" metadata field. """
def __init__(self, field, value):
self.field = field
if value:
self.value = field.clean(value)
else:
self.value = None
def __unicode__(self):
if self.value:
return mark_safe(self.field.render(self.value))
else:
return u""
def __str__(self):
return self.__unicode__().encode("ascii", "ignore")
class MetadataBase(type):
def __new__(cls, name, bases, attrs):
# TODO: Think of a better test to avoid processing Metadata parent class
if bases == (object,):
return type.__new__(cls, name, bases, attrs)
# Save options as a dict for now (we will be editing them)
# TODO: Is this necessary, should we bother relaying Django Meta options?
Meta = attrs.pop('Meta', {})
if Meta:
Meta = Meta.__dict__.copy()
# Remove our options from Meta, so Django won't complain
help_text = attrs.pop('HelpText', {})
# TODO: Is this necessary
if help_text:
help_text = help_text.__dict__.copy()
options = Options(Meta, help_text)
# Collect and sort our elements
elements = [(key, attrs.pop(key)) for key, obj in attrs.items()
if isinstance(obj, MetadataField)]
elements.sort(lambda x, y: cmp(x[1].creation_counter,
y[1].creation_counter))
elements = SortedDict(elements)
# Validation:
# TODO: Write a test framework for seo.Metadata validation
# Check that no group names clash with element names
for key,members in options.groups.items():
assert key not in elements, "Group name '%s' clashes with field name" % key
for member in members:
assert member in elements, "Group member '%s' is not a valid field" % member
# Check that the names of the elements are not going to clash with a model field
for key in elements:
assert key not in RESERVED_FIELD_NAMES, "Field name '%s' is not allowed" % key
# Preprocessing complete, here is the new class
new_class = type.__new__(cls, name, bases, attrs)
options.metadata = new_class
new_class._meta = options
# Some useful attributes
options._update_from_name(name)
options._register_elements(elements)
try:
for backend_name in options.backends:
new_class._meta._add_backend(backend_registry[backend_name])
for backend_name in options.backends:
backend_registry[backend_name].validate(options)
except KeyError:
raise Exception('Metadata backend "%s" is not installed.' % backend_name)
#new_class._meta._add_backend(PathBackend)
#new_class._meta._add_backend(ModelInstanceBackend)
#new_class._meta._add_backend(ModelBackend)
#new_class._meta._add_backend(ViewBackend)
registry[name] = new_class
return new_class
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
def _get_formatted_data(cls, path, context=None, site=None, language=None):
""" Return an object to conveniently access the appropriate values. """
return FormattedMetadata(cls(), cls._get_instances(path, context, site, language), path, site, language)
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
def _get_instances(cls, path, context=None, site=None, language=None):
""" A sequence of instances to discover metadata.
Each instance from each backend is looked up when possible/necessary.
This is a generator to eliminate unnecessary queries.
"""
backend_context = {'view_context': context }
for model in cls._meta.models.values():
for instance in model.objects.get_instances(path, site, language, backend_context) or []:
if hasattr(instance, '_process_context'):
instance._process_context(backend_context)
yield instance
class Metadata(object):
__metaclass__ = MetadataBase
def _get_metadata_model(name=None):
# Find registered Metadata object
if name is not None:
try:
return registry[name]
except KeyError:
if len(registry) == 1:
valid_names = u'Try using the name "%s" or simply leaving it out altogether.'% registry.keys()[0]
else:
valid_names = u"Valid names are " + u", ".join(u'"%s"' % k for k in registry.keys())
raise Exception(u"Metadata definition with name \"%s\" does not exist.\n%s" % (name, valid_names))
else:
assert len(registry) == 1, "You must have exactly one Metadata class, if using get_metadata() without a 'name' parameter."
return registry.values()[0]
def get_metadata(path, name=None, context=None, site=None, language=None):
metadata = _get_metadata_model(name)
return metadata._get_formatted_data(path, context, site, language)
def create_metadata_instance(metadata_class, instance):
# If this instance is marked as handled, don't do anything
# This typically means that the django admin will add metadata
# using eg an inline.
if getattr(instance, '_MetadataFormset__seo_metadata_handled', False):
return
metadata = None
content_type = ContentType.objects.get_for_model(instance)
# If this object does not define a path, don't worry about automatic update
try:
path = instance.get_absolute_url()
except AttributeError:
return
# Look for an existing object with this path
language = getattr(instance, '_language', None)
site = getattr(instance, '_site', None)
for md in metadata_class.objects.get_instances(path, site, language):
# If another object has the same path, remove the path.
# It's harsh, but we need a unique path and will assume the other
# link is outdated.
if md._content_type != content_type or md._object_id != instance.pk:
md._path = md._content_object.get_absolute_url()
md.save()
# Move on, this metadata instance isn't for us
md = None
else:
# This is our instance!
metadata = md
# If the path-based search didn't work, look for (or create) an existing
# instance linked to this object.
if not metadata:
metadata, md_created = metadata_class.objects.get_or_create(_content_type=content_type, _object_id=instance.pk)
metadata._path = path
metadata.save()
def populate_metadata(model, MetadataClass):
""" For a given model and metadata class, ensure there is metadata for every instance.
"""
content_type = ContentType.objects.get_for_model(model)
for instance in model.objects.all():
create_metadata_instance(MetadataClass, instance)
def _update_callback(model_class, sender, instance, created, **kwargs):
""" Callback to be attached to a post_save signal, updating the relevant
metadata, or just creating an entry.
NB:
It is theoretically possible that this code will lead to two instances
with the same generic foreign key. If you have non-overlapping URLs,
then this shouldn't happen.
I've held it to be more important to avoid double path entries.
"""
create_metadata_instance(model_class, instance)
def _delete_callback(model_class, sender, instance, **kwargs):
content_type = ContentType.objects.get_for_model(instance)
model_class.objects.filter(_content_type=content_type, _object_id=instance.pk).delete()
def register_signals():
for metadata_class in registry.values():
model_instance = metadata_class._meta.get_model('modelinstance')
if model_instance is not None:
update_callback = curry(_update_callback, model_class=model_instance)
delete_callback = curry(_delete_callback, model_class=model_instance)
## Connect the models listed in settings to the update callback.
for model in metadata_class._meta.seo_models:
models.signals.post_save.connect(update_callback, sender=model, weak=False)
models.signals.pre_delete.connect(delete_callback, sender=model, weak=False)
|
willhardy/django-seo | rollyourown/seo/base.py | populate_metadata | python | def populate_metadata(model, MetadataClass):
content_type = ContentType.objects.get_for_model(model)
for instance in model.objects.all():
create_metadata_instance(MetadataClass, instance) | For a given model and metadata class, ensure there is metadata for every instance. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L326-L331 | [
"def create_metadata_instance(metadata_class, instance):\n # If this instance is marked as handled, don't do anything\n # This typically means that the django admin will add metadata \n # using eg an inline.\n if getattr(instance, '_MetadataFormset__seo_metadata_handled', False):\n return\n\n metadata = None\n content_type = ContentType.objects.get_for_model(instance)\n\n # If this object does not define a path, don't worry about automatic update\n try:\n path = instance.get_absolute_url()\n except AttributeError:\n return\n\n # Look for an existing object with this path\n language = getattr(instance, '_language', None)\n site = getattr(instance, '_site', None)\n for md in metadata_class.objects.get_instances(path, site, language):\n # If another object has the same path, remove the path.\n # It's harsh, but we need a unique path and will assume the other\n # link is outdated.\n if md._content_type != content_type or md._object_id != instance.pk:\n md._path = md._content_object.get_absolute_url()\n md.save()\n # Move on, this metadata instance isn't for us\n md = None\n else:\n # This is our instance!\n metadata = md\n\n # If the path-based search didn't work, look for (or create) an existing\n # instance linked to this object.\n if not metadata:\n metadata, md_created = metadata_class.objects.get_or_create(_content_type=content_type, _object_id=instance.pk)\n metadata._path = path\n metadata.save()\n"
] | # -*- coding: utf-8 -*-
# TODO:
# * Move/rename namespace polluting attributes
# * Documentation
# * Make backends optional: Meta.backends = (path, modelinstance/model, view)
import hashlib
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.datastructures import SortedDict
from django.utils.functional import curry
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.utils.safestring import mark_safe
from django.core.cache import cache
from django.utils.encoding import iri_to_uri
from rollyourown.seo.utils import NotSet, Literal
from rollyourown.seo.options import Options
from rollyourown.seo.fields import MetadataField, Tag, MetaTag, KeywordTag, Raw
from rollyourown.seo.backends import backend_registry, RESERVED_FIELD_NAMES
registry = SortedDict()
class FormattedMetadata(object):
""" Allows convenient access to selected metadata.
Metadata for each field may be sourced from any one of the relevant instances passed.
"""
def __init__(self, metadata, instances, path, site=None, language=None):
self.__metadata = metadata
if metadata._meta.use_cache:
if metadata._meta.use_sites and site:
hexpath = hashlib.md5(iri_to_uri(site.domain+path)).hexdigest()
else:
hexpath = hashlib.md5(iri_to_uri(path)).hexdigest()
if metadata._meta.use_i18n:
self.__cache_prefix = 'rollyourown.seo.%s.%s.%s' % (self.__metadata.__class__.__name__, hexpath, language)
else:
self.__cache_prefix = 'rollyourown.seo.%s.%s' % (self.__metadata.__class__.__name__, hexpath)
else:
self.__cache_prefix = None
self.__instances_original = instances
self.__instances_cache = []
def __instances(self):
""" Cache instances, allowing generators to be used and reused.
This fills a cache as the generator gets emptied, eventually
reading exclusively from the cache.
"""
for instance in self.__instances_cache:
yield instance
for instance in self.__instances_original:
self.__instances_cache.append(instance)
yield instance
def _resolve_value(self, name):
""" Returns an appropriate value for the given name.
This simply asks each of the instances for a value.
"""
for instance in self.__instances():
value = instance._resolve_value(name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
# TODO: This is duplicated in meta_models. Move this to a common home.
if name in self.__metadata._meta.elements:
populate_from = self.__metadata._meta.elements[name].populate_from
if callable(populate_from):
return populate_from(None)
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from)
def __getattr__(self, name):
# If caching is enabled, work out a key
if self.__cache_prefix:
cache_key = '%s.%s' % (self.__cache_prefix, name)
value = cache.get(cache_key)
else:
cache_key = None
value = None
# Look for a group called "name"
if name in self.__metadata._meta.groups:
if value is not None:
return value or None
value = '\n'.join(unicode(BoundMetadataField(self.__metadata._meta.elements[f], self._resolve_value(f))) for f in self.__metadata._meta.groups[name]).strip()
# Look for an element called "name"
elif name in self.__metadata._meta.elements:
if value is not None:
return BoundMetadataField(self.__metadata._meta.elements[name], value or None)
value = self._resolve_value(name)
if cache_key is not None:
cache.set(cache_key, value or '')
return BoundMetadataField(self.__metadata._meta.elements[name], value)
else:
raise AttributeError
if cache_key is not None:
cache.set(cache_key, value or '')
return value or None
def __unicode__(self):
""" String version of this object is the html output of head elements. """
if self.__cache_prefix is not None:
value = cache.get(self.__cache_prefix)
else:
value = None
if value is None:
value = mark_safe(u'\n'.join(unicode(getattr(self, f)) for f,e in self.__metadata._meta.elements.items() if e.head))
if self.__cache_prefix is not None:
cache.set(self.__cache_prefix, value or '')
return value
class BoundMetadataField(object):
""" An object to help provide templates with access to a "bound" metadata field. """
def __init__(self, field, value):
self.field = field
if value:
self.value = field.clean(value)
else:
self.value = None
def __unicode__(self):
if self.value:
return mark_safe(self.field.render(self.value))
else:
return u""
def __str__(self):
return self.__unicode__().encode("ascii", "ignore")
class MetadataBase(type):
def __new__(cls, name, bases, attrs):
# TODO: Think of a better test to avoid processing Metadata parent class
if bases == (object,):
return type.__new__(cls, name, bases, attrs)
# Save options as a dict for now (we will be editing them)
# TODO: Is this necessary, should we bother relaying Django Meta options?
Meta = attrs.pop('Meta', {})
if Meta:
Meta = Meta.__dict__.copy()
# Remove our options from Meta, so Django won't complain
help_text = attrs.pop('HelpText', {})
# TODO: Is this necessary
if help_text:
help_text = help_text.__dict__.copy()
options = Options(Meta, help_text)
# Collect and sort our elements
elements = [(key, attrs.pop(key)) for key, obj in attrs.items()
if isinstance(obj, MetadataField)]
elements.sort(lambda x, y: cmp(x[1].creation_counter,
y[1].creation_counter))
elements = SortedDict(elements)
# Validation:
# TODO: Write a test framework for seo.Metadata validation
# Check that no group names clash with element names
for key,members in options.groups.items():
assert key not in elements, "Group name '%s' clashes with field name" % key
for member in members:
assert member in elements, "Group member '%s' is not a valid field" % member
# Check that the names of the elements are not going to clash with a model field
for key in elements:
assert key not in RESERVED_FIELD_NAMES, "Field name '%s' is not allowed" % key
# Preprocessing complete, here is the new class
new_class = type.__new__(cls, name, bases, attrs)
options.metadata = new_class
new_class._meta = options
# Some useful attributes
options._update_from_name(name)
options._register_elements(elements)
try:
for backend_name in options.backends:
new_class._meta._add_backend(backend_registry[backend_name])
for backend_name in options.backends:
backend_registry[backend_name].validate(options)
except KeyError:
raise Exception('Metadata backend "%s" is not installed.' % backend_name)
#new_class._meta._add_backend(PathBackend)
#new_class._meta._add_backend(ModelInstanceBackend)
#new_class._meta._add_backend(ModelBackend)
#new_class._meta._add_backend(ViewBackend)
registry[name] = new_class
return new_class
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
def _get_formatted_data(cls, path, context=None, site=None, language=None):
""" Return an object to conveniently access the appropriate values. """
return FormattedMetadata(cls(), cls._get_instances(path, context, site, language), path, site, language)
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
def _get_instances(cls, path, context=None, site=None, language=None):
""" A sequence of instances to discover metadata.
Each instance from each backend is looked up when possible/necessary.
This is a generator to eliminate unnecessary queries.
"""
backend_context = {'view_context': context }
for model in cls._meta.models.values():
for instance in model.objects.get_instances(path, site, language, backend_context) or []:
if hasattr(instance, '_process_context'):
instance._process_context(backend_context)
yield instance
class Metadata(object):
__metaclass__ = MetadataBase
def _get_metadata_model(name=None):
# Find registered Metadata object
if name is not None:
try:
return registry[name]
except KeyError:
if len(registry) == 1:
valid_names = u'Try using the name "%s" or simply leaving it out altogether.'% registry.keys()[0]
else:
valid_names = u"Valid names are " + u", ".join(u'"%s"' % k for k in registry.keys())
raise Exception(u"Metadata definition with name \"%s\" does not exist.\n%s" % (name, valid_names))
else:
assert len(registry) == 1, "You must have exactly one Metadata class, if using get_metadata() without a 'name' parameter."
return registry.values()[0]
def get_metadata(path, name=None, context=None, site=None, language=None):
metadata = _get_metadata_model(name)
return metadata._get_formatted_data(path, context, site, language)
def get_linked_metadata(obj, name=None, context=None, site=None, language=None):
""" Gets metadata linked from the given object. """
# XXX Check that 'modelinstance' and 'model' metadata are installed in backends
# I believe that get_model() would return None if not
Metadata = _get_metadata_model(name)
InstanceMetadata = Metadata._meta.get_model('modelinstance')
ModelMetadata = Metadata._meta.get_model('model')
content_type = ContentType.objects.get_for_model(obj)
instances = []
if InstanceMetadata is not None:
try:
instance_md = InstanceMetadata.objects.get(_content_type=content_type, _object_id=obj.pk)
except InstanceMetadata.DoesNotExist:
instance_md = InstanceMetadata(_content_object=obj)
instances.append(instance_md)
if ModelMetadata is not None:
try:
model_md = ModelMetadata.objects.get(_content_type=content_type)
except ModelMetadata.DoesNotExist:
model_md = ModelMetadata(_content_type=content_type)
instances.append(model_md)
return FormattedMetadata(Metadata, instances, '', site, language)
def create_metadata_instance(metadata_class, instance):
# If this instance is marked as handled, don't do anything
# This typically means that the django admin will add metadata
# using eg an inline.
if getattr(instance, '_MetadataFormset__seo_metadata_handled', False):
return
metadata = None
content_type = ContentType.objects.get_for_model(instance)
# If this object does not define a path, don't worry about automatic update
try:
path = instance.get_absolute_url()
except AttributeError:
return
# Look for an existing object with this path
language = getattr(instance, '_language', None)
site = getattr(instance, '_site', None)
for md in metadata_class.objects.get_instances(path, site, language):
# If another object has the same path, remove the path.
# It's harsh, but we need a unique path and will assume the other
# link is outdated.
if md._content_type != content_type or md._object_id != instance.pk:
md._path = md._content_object.get_absolute_url()
md.save()
# Move on, this metadata instance isn't for us
md = None
else:
# This is our instance!
metadata = md
# If the path-based search didn't work, look for (or create) an existing
# instance linked to this object.
if not metadata:
metadata, md_created = metadata_class.objects.get_or_create(_content_type=content_type, _object_id=instance.pk)
metadata._path = path
metadata.save()
def _update_callback(model_class, sender, instance, created, **kwargs):
""" Callback to be attached to a post_save signal, updating the relevant
metadata, or just creating an entry.
NB:
It is theoretically possible that this code will lead to two instances
with the same generic foreign key. If you have non-overlapping URLs,
then this shouldn't happen.
I've held it to be more important to avoid double path entries.
"""
create_metadata_instance(model_class, instance)
def _delete_callback(model_class, sender, instance, **kwargs):
content_type = ContentType.objects.get_for_model(instance)
model_class.objects.filter(_content_type=content_type, _object_id=instance.pk).delete()
def register_signals():
for metadata_class in registry.values():
model_instance = metadata_class._meta.get_model('modelinstance')
if model_instance is not None:
update_callback = curry(_update_callback, model_class=model_instance)
delete_callback = curry(_delete_callback, model_class=model_instance)
## Connect the models listed in settings to the update callback.
for model in metadata_class._meta.seo_models:
models.signals.post_save.connect(update_callback, sender=model, weak=False)
models.signals.pre_delete.connect(delete_callback, sender=model, weak=False)
|
willhardy/django-seo | rollyourown/seo/base.py | FormattedMetadata.__instances | python | def __instances(self):
for instance in self.__instances_cache:
yield instance
for instance in self.__instances_original:
self.__instances_cache.append(instance)
yield instance | Cache instances, allowing generators to be used and reused.
This fills a cache as the generator gets emptied, eventually
reading exclusively from the cache. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L50-L59 | null | class FormattedMetadata(object):
""" Allows convenient access to selected metadata.
Metadata for each field may be sourced from any one of the relevant instances passed.
"""
def __init__(self, metadata, instances, path, site=None, language=None):
self.__metadata = metadata
if metadata._meta.use_cache:
if metadata._meta.use_sites and site:
hexpath = hashlib.md5(iri_to_uri(site.domain+path)).hexdigest()
else:
hexpath = hashlib.md5(iri_to_uri(path)).hexdigest()
if metadata._meta.use_i18n:
self.__cache_prefix = 'rollyourown.seo.%s.%s.%s' % (self.__metadata.__class__.__name__, hexpath, language)
else:
self.__cache_prefix = 'rollyourown.seo.%s.%s' % (self.__metadata.__class__.__name__, hexpath)
else:
self.__cache_prefix = None
self.__instances_original = instances
self.__instances_cache = []
def _resolve_value(self, name):
""" Returns an appropriate value for the given name.
This simply asks each of the instances for a value.
"""
for instance in self.__instances():
value = instance._resolve_value(name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
# TODO: This is duplicated in meta_models. Move this to a common home.
if name in self.__metadata._meta.elements:
populate_from = self.__metadata._meta.elements[name].populate_from
if callable(populate_from):
return populate_from(None)
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from)
def __getattr__(self, name):
# If caching is enabled, work out a key
if self.__cache_prefix:
cache_key = '%s.%s' % (self.__cache_prefix, name)
value = cache.get(cache_key)
else:
cache_key = None
value = None
# Look for a group called "name"
if name in self.__metadata._meta.groups:
if value is not None:
return value or None
value = '\n'.join(unicode(BoundMetadataField(self.__metadata._meta.elements[f], self._resolve_value(f))) for f in self.__metadata._meta.groups[name]).strip()
# Look for an element called "name"
elif name in self.__metadata._meta.elements:
if value is not None:
return BoundMetadataField(self.__metadata._meta.elements[name], value or None)
value = self._resolve_value(name)
if cache_key is not None:
cache.set(cache_key, value or '')
return BoundMetadataField(self.__metadata._meta.elements[name], value)
else:
raise AttributeError
if cache_key is not None:
cache.set(cache_key, value or '')
return value or None
def __unicode__(self):
""" String version of this object is the html output of head elements. """
if self.__cache_prefix is not None:
value = cache.get(self.__cache_prefix)
else:
value = None
if value is None:
value = mark_safe(u'\n'.join(unicode(getattr(self, f)) for f,e in self.__metadata._meta.elements.items() if e.head))
if self.__cache_prefix is not None:
cache.set(self.__cache_prefix, value or '')
return value
|
willhardy/django-seo | rollyourown/seo/base.py | FormattedMetadata._resolve_value | python | def _resolve_value(self, name):
for instance in self.__instances():
value = instance._resolve_value(name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
# TODO: This is duplicated in meta_models. Move this to a common home.
if name in self.__metadata._meta.elements:
populate_from = self.__metadata._meta.elements[name].populate_from
if callable(populate_from):
return populate_from(None)
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from) | Returns an appropriate value for the given name.
This simply asks each of the instances for a value. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L61-L79 | [
"def __instances(self):\n \"\"\" Cache instances, allowing generators to be used and reused. \n This fills a cache as the generator gets emptied, eventually\n reading exclusively from the cache.\n \"\"\"\n for instance in self.__instances_cache:\n yield instance\n for instance in self.__instances_original:\n self.__instances_cache.append(instance)\n yield instance\n",
"def _resolve_value(self, name):\n \"\"\" Returns an appropriate value for the given name. \n This simply asks each of the instances for a value.\n \"\"\"\n for instance in self.__instances():\n value = instance._resolve_value(name)\n if value:\n return value\n\n # Otherwise, return an appropriate default value (populate_from)\n # TODO: This is duplicated in meta_models. Move this to a common home.\n if name in self.__metadata._meta.elements:\n populate_from = self.__metadata._meta.elements[name].populate_from\n if callable(populate_from):\n return populate_from(None)\n elif isinstance(populate_from, Literal):\n return populate_from.value\n elif populate_from is not NotSet:\n return self._resolve_value(populate_from)\n"
] | class FormattedMetadata(object):
""" Allows convenient access to selected metadata.
Metadata for each field may be sourced from any one of the relevant instances passed.
"""
def __init__(self, metadata, instances, path, site=None, language=None):
self.__metadata = metadata
if metadata._meta.use_cache:
if metadata._meta.use_sites and site:
hexpath = hashlib.md5(iri_to_uri(site.domain+path)).hexdigest()
else:
hexpath = hashlib.md5(iri_to_uri(path)).hexdigest()
if metadata._meta.use_i18n:
self.__cache_prefix = 'rollyourown.seo.%s.%s.%s' % (self.__metadata.__class__.__name__, hexpath, language)
else:
self.__cache_prefix = 'rollyourown.seo.%s.%s' % (self.__metadata.__class__.__name__, hexpath)
else:
self.__cache_prefix = None
self.__instances_original = instances
self.__instances_cache = []
def __instances(self):
""" Cache instances, allowing generators to be used and reused.
This fills a cache as the generator gets emptied, eventually
reading exclusively from the cache.
"""
for instance in self.__instances_cache:
yield instance
for instance in self.__instances_original:
self.__instances_cache.append(instance)
yield instance
def __getattr__(self, name):
# If caching is enabled, work out a key
if self.__cache_prefix:
cache_key = '%s.%s' % (self.__cache_prefix, name)
value = cache.get(cache_key)
else:
cache_key = None
value = None
# Look for a group called "name"
if name in self.__metadata._meta.groups:
if value is not None:
return value or None
value = '\n'.join(unicode(BoundMetadataField(self.__metadata._meta.elements[f], self._resolve_value(f))) for f in self.__metadata._meta.groups[name]).strip()
# Look for an element called "name"
elif name in self.__metadata._meta.elements:
if value is not None:
return BoundMetadataField(self.__metadata._meta.elements[name], value or None)
value = self._resolve_value(name)
if cache_key is not None:
cache.set(cache_key, value or '')
return BoundMetadataField(self.__metadata._meta.elements[name], value)
else:
raise AttributeError
if cache_key is not None:
cache.set(cache_key, value or '')
return value or None
def __unicode__(self):
""" String version of this object is the html output of head elements. """
if self.__cache_prefix is not None:
value = cache.get(self.__cache_prefix)
else:
value = None
if value is None:
value = mark_safe(u'\n'.join(unicode(getattr(self, f)) for f,e in self.__metadata._meta.elements.items() if e.head))
if self.__cache_prefix is not None:
cache.set(self.__cache_prefix, value or '')
return value
|
willhardy/django-seo | rollyourown/seo/base.py | MetadataBase._get_formatted_data | python | def _get_formatted_data(cls, path, context=None, site=None, language=None):
return FormattedMetadata(cls(), cls._get_instances(path, context, site, language), path, site, language) | Return an object to conveniently access the appropriate values. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L217-L219 | null | class MetadataBase(type):
def __new__(cls, name, bases, attrs):
# TODO: Think of a better test to avoid processing Metadata parent class
if bases == (object,):
return type.__new__(cls, name, bases, attrs)
# Save options as a dict for now (we will be editing them)
# TODO: Is this necessary, should we bother relaying Django Meta options?
Meta = attrs.pop('Meta', {})
if Meta:
Meta = Meta.__dict__.copy()
# Remove our options from Meta, so Django won't complain
help_text = attrs.pop('HelpText', {})
# TODO: Is this necessary
if help_text:
help_text = help_text.__dict__.copy()
options = Options(Meta, help_text)
# Collect and sort our elements
elements = [(key, attrs.pop(key)) for key, obj in attrs.items()
if isinstance(obj, MetadataField)]
elements.sort(lambda x, y: cmp(x[1].creation_counter,
y[1].creation_counter))
elements = SortedDict(elements)
# Validation:
# TODO: Write a test framework for seo.Metadata validation
# Check that no group names clash with element names
for key,members in options.groups.items():
assert key not in elements, "Group name '%s' clashes with field name" % key
for member in members:
assert member in elements, "Group member '%s' is not a valid field" % member
# Check that the names of the elements are not going to clash with a model field
for key in elements:
assert key not in RESERVED_FIELD_NAMES, "Field name '%s' is not allowed" % key
# Preprocessing complete, here is the new class
new_class = type.__new__(cls, name, bases, attrs)
options.metadata = new_class
new_class._meta = options
# Some useful attributes
options._update_from_name(name)
options._register_elements(elements)
try:
for backend_name in options.backends:
new_class._meta._add_backend(backend_registry[backend_name])
for backend_name in options.backends:
backend_registry[backend_name].validate(options)
except KeyError:
raise Exception('Metadata backend "%s" is not installed.' % backend_name)
#new_class._meta._add_backend(PathBackend)
#new_class._meta._add_backend(ModelInstanceBackend)
#new_class._meta._add_backend(ModelBackend)
#new_class._meta._add_backend(ViewBackend)
registry[name] = new_class
return new_class
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
def _get_instances(cls, path, context=None, site=None, language=None):
""" A sequence of instances to discover metadata.
Each instance from each backend is looked up when possible/necessary.
This is a generator to eliminate unnecessary queries.
"""
backend_context = {'view_context': context }
for model in cls._meta.models.values():
for instance in model.objects.get_instances(path, site, language, backend_context) or []:
if hasattr(instance, '_process_context'):
instance._process_context(backend_context)
yield instance
|
willhardy/django-seo | rollyourown/seo/base.py | MetadataBase._get_instances | python | def _get_instances(cls, path, context=None, site=None, language=None):
backend_context = {'view_context': context }
for model in cls._meta.models.values():
for instance in model.objects.get_instances(path, site, language, backend_context) or []:
if hasattr(instance, '_process_context'):
instance._process_context(backend_context)
yield instance | A sequence of instances to discover metadata.
Each instance from each backend is looked up when possible/necessary.
This is a generator to eliminate unnecessary queries. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L223-L234 | null | class MetadataBase(type):
def __new__(cls, name, bases, attrs):
# TODO: Think of a better test to avoid processing Metadata parent class
if bases == (object,):
return type.__new__(cls, name, bases, attrs)
# Save options as a dict for now (we will be editing them)
# TODO: Is this necessary, should we bother relaying Django Meta options?
Meta = attrs.pop('Meta', {})
if Meta:
Meta = Meta.__dict__.copy()
# Remove our options from Meta, so Django won't complain
help_text = attrs.pop('HelpText', {})
# TODO: Is this necessary
if help_text:
help_text = help_text.__dict__.copy()
options = Options(Meta, help_text)
# Collect and sort our elements
elements = [(key, attrs.pop(key)) for key, obj in attrs.items()
if isinstance(obj, MetadataField)]
elements.sort(lambda x, y: cmp(x[1].creation_counter,
y[1].creation_counter))
elements = SortedDict(elements)
# Validation:
# TODO: Write a test framework for seo.Metadata validation
# Check that no group names clash with element names
for key,members in options.groups.items():
assert key not in elements, "Group name '%s' clashes with field name" % key
for member in members:
assert member in elements, "Group member '%s' is not a valid field" % member
# Check that the names of the elements are not going to clash with a model field
for key in elements:
assert key not in RESERVED_FIELD_NAMES, "Field name '%s' is not allowed" % key
# Preprocessing complete, here is the new class
new_class = type.__new__(cls, name, bases, attrs)
options.metadata = new_class
new_class._meta = options
# Some useful attributes
options._update_from_name(name)
options._register_elements(elements)
try:
for backend_name in options.backends:
new_class._meta._add_backend(backend_registry[backend_name])
for backend_name in options.backends:
backend_registry[backend_name].validate(options)
except KeyError:
raise Exception('Metadata backend "%s" is not installed.' % backend_name)
#new_class._meta._add_backend(PathBackend)
#new_class._meta._add_backend(ModelInstanceBackend)
#new_class._meta._add_backend(ModelBackend)
#new_class._meta._add_backend(ViewBackend)
registry[name] = new_class
return new_class
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
def _get_formatted_data(cls, path, context=None, site=None, language=None):
""" Return an object to conveniently access the appropriate values. """
return FormattedMetadata(cls(), cls._get_instances(path, context, site, language), path, site, language)
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
|
willhardy/django-seo | rollyourown/seo/backends.py | _resolve | python | def _resolve(value, model_instance=None, context=None):
if isinstance(value, basestring) and "{" in value:
if context is None:
context = Context()
if model_instance is not None:
context[model_instance._meta.module_name] = model_instance
value = Template(value).render(context)
return value | Resolves any template references in the given value. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/backends.py#L331-L341 | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.db import models
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.template import Template, Context
from django.utils.datastructures import SortedDict
from rollyourown.seo.utils import resolve_to_name, NotSet, Literal
RESERVED_FIELD_NAMES = ('_metadata', '_path', '_content_type', '_object_id',
'_content_object', '_view', '_site', 'objects',
'_resolve_value', '_set_context', 'id', 'pk' )
backend_registry = SortedDict()
class MetadataBaseModel(models.Model):
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super(MetadataBaseModel, self).__init__(*args, **kwargs)
# Provide access to a class instance
# TODO Rename to __metadata
self._metadata = self.__class__._metadata()
# TODO Rename to __resolve_value?
def _resolve_value(self, name):
""" Returns an appropriate value for the given name. """
name = str(name)
if name in self._metadata._meta.elements:
element = self._metadata._meta.elements[name]
# Look in instances for an explicit value
if element.editable:
value = getattr(self, name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
populate_from = element.populate_from
if callable(populate_from):
return populate_from(self, **self._populate_from_kwargs())
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from)
# If this is not an element, look for an attribute on metadata
try:
value = getattr(self._metadata, name)
except AttributeError:
pass
else:
if callable(value):
if getattr(value, 'im_self', None):
return value(self)
else:
return value(self._metadata, self)
return value
def _populate_from_kwargs(self):
return {}
class BaseManager(models.Manager):
def on_current_site(self, site=None):
if isinstance(site, Site):
site_id = site.id
elif site is not None:
site_id = site and Site.objects.get(domain=site).id
else:
site_id = settings.SITE_ID
# Exclude entries for other sites
where = ['_site_id IS NULL OR _site_id=%s']
return self.get_query_set().extra(where=where, params=[site_id])
def for_site_and_language(self, site=None, language=None):
queryset = self.on_current_site(site)
if language:
queryset = queryset.filter(_language=language)
return queryset
# Following is part of an incomplete move to define backends, which will:
# - contain the business logic of backends to a short, succinct module
# - allow individual backends to be turned on and off
# - allow new backends to be added by end developers
#
# A Backend:
# - defines an abstract base class for storing the information required to associate metadata with its target (ie a view, a path, a model instance etc)
# - defines a method for retrieving an instance
#
# This is not particularly easy.
# - unique_together fields need to be defined in the same django model, as some django versions don't enforce the uniqueness when it spans subclasses
# - most backends use the path to find a matching instance. The model backend however ideally needs a content_type (found from a model instance backend, which used the path)
# - catering for all the possible options (use_sites, use_languages), needs to be done succiently, and at compile time
#
# This means that:
# - all fields that share uniqueness (backend fields, _site, _language) need to be defined in the same model
# - as backends should have full control over the model, therefore every backend needs to define the compulsory fields themselves (eg _site and _language).
# There is no way to add future compulsory fields to all backends without editing each backend individually.
# This is probably going to have to be a limitataion we need to live with.
class MetadataBackend(object):
name = None
verbose_name = None
unique_together = None
class __metaclass__(type):
def __new__(cls, name, bases, attrs):
new_class = type.__new__(cls, name, bases, attrs)
backend_registry[new_class.name] = new_class
return new_class
def get_unique_together(self, options):
ut = []
for ut_set in self.unique_together:
ut_set = [a for a in ut_set]
if options.use_sites:
ut_set.append('_site')
if options.use_i18n:
ut_set.append('_language')
ut.append(tuple(ut_set))
return tuple(ut)
def get_manager(self, options):
_get_instances = self.get_instances
class _Manager(BaseManager):
def get_instances(self, path, site=None, language=None, context=None):
queryset = self.for_site_and_language(site, language)
return _get_instances(queryset, path, context)
if not options.use_sites:
def for_site_and_language(self, site=None, language=None):
queryset = self.get_query_set()
if language:
queryset = queryset.filter(_language=language)
return queryset
return _Manager
@staticmethod
def validate(options):
""" Validates the application of this backend to a given metadata
"""
class PathBackend(MetadataBackend):
name = "path"
verbose_name = "Path"
unique_together = (("_path",),)
def get_instances(self, queryset, path, context):
return queryset.filter(_path=path)
def get_model(self, options):
class PathMetadataBase(MetadataBaseModel):
_path = models.CharField(_('path'), max_length=255, unique=not (options.use_sites or options.use_i18n))
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def __unicode__(self):
return self._path
def _populate_from_kwargs(self):
return {'path': self._path}
class Meta:
abstract = True
unique_together = self.get_unique_together(options)
return PathMetadataBase
class ViewBackend(MetadataBackend):
name = "view"
verbose_name = "View"
unique_together = (("_view",),)
def get_instances(self, queryset, path, context):
view_name = ""
if path is not None:
view_name = resolve_to_name(path)
return queryset.filter(_view=view_name or "")
def get_model(self, options):
class ViewMetadataBase(MetadataBaseModel):
_view = models.CharField(_('view'), max_length=255, unique=not (options.use_sites or options.use_i18n), default="", blank=True)
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def _process_context(self, context):
""" Use the context when rendering any substitutions. """
if 'view_context' in context:
self.__context = context['view_context']
def _populate_from_kwargs(self):
return {'view_name': self._view}
def _resolve_value(self, name):
value = super(ViewMetadataBase, self)._resolve_value(name)
try:
return _resolve(value, context=self.__context)
except AttributeError:
return value
def __unicode__(self):
return self._view
class Meta:
abstract = True
unique_together = self.get_unique_together(options)
return ViewMetadataBase
class ModelInstanceBackend(MetadataBackend):
name = "modelinstance"
verbose_name = "Model Instance"
unique_together = (("_path",), ("_content_type", "_object_id"))
def get_instances(self, queryset, path, context):
return queryset.filter(_path=path)
def get_model(self, options):
class ModelInstanceMetadataBase(MetadataBaseModel):
_path = models.CharField(_('path'), max_length=255, editable=False, unique=not (options.use_sites or options.use_i18n))
_content_type = models.ForeignKey(ContentType, editable=False)
_object_id = models.PositiveIntegerField(editable=False)
_content_object = generic.GenericForeignKey('_content_type', '_object_id')
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def __unicode__(self):
return self._path
class Meta:
unique_together = self.get_unique_together(options)
abstract = True
def _process_context(self, context):
context['content_type'] = self._content_type
context['model_instance'] = self
def _populate_from_kwargs(self):
return {'model_instance': self._content_object}
def save(self, *args, **kwargs):
try:
path_func = self._content_object.get_absolute_url
except AttributeError:
pass
else:
self._path = path_func()
super(ModelInstanceMetadataBase, self).save(*args, **kwargs)
return ModelInstanceMetadataBase
class ModelBackend(MetadataBackend):
name = "model"
verbose_name = "Model"
unique_together = (("_content_type",),)
def get_instances(self, queryset, path, context):
if context and 'content_type' in context:
return queryset.filter(_content_type=context['content_type'])
def get_model(self, options):
class ModelMetadataBase(MetadataBaseModel):
_content_type = models.ForeignKey(ContentType)
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def __unicode__(self):
return unicode(self._content_type)
def _process_context(self, context):
""" Use the given model instance as context for rendering
any substitutions.
"""
if 'model_instance' in context:
self.__instance = context['model_instance']
def _populate_from_kwargs(self):
return {'content_type': self._content_type}
def _resolve_value(self, name):
value = super(ModelMetadataBase, self)._resolve_value(name)
try:
return _resolve(value, self.__instance._content_object)
except AttributeError:
return value
class Meta:
abstract = True
unique_together = self.get_unique_together(options)
return ModelMetadataBase
@staticmethod
def validate(options):
""" Validates the application of this backend to a given metadata
"""
try:
if options.backends.index('modelinstance') > options.backends.index('model'):
raise Exception("Metadata backend 'modelinstance' must come before 'model' backend")
except ValueError:
raise Exception("Metadata backend 'modelinstance' must be installed in order to use 'model' backend")
|
willhardy/django-seo | rollyourown/seo/backends.py | ModelBackend.validate | python | def validate(options):
try:
if options.backends.index('modelinstance') > options.backends.index('model'):
raise Exception("Metadata backend 'modelinstance' must come before 'model' backend")
except ValueError:
raise Exception("Metadata backend 'modelinstance' must be installed in order to use 'model' backend") | Validates the application of this backend to a given metadata | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/backends.py#L320-L327 | null | class ModelBackend(MetadataBackend):
name = "model"
verbose_name = "Model"
unique_together = (("_content_type",),)
def get_instances(self, queryset, path, context):
if context and 'content_type' in context:
return queryset.filter(_content_type=context['content_type'])
def get_model(self, options):
class ModelMetadataBase(MetadataBaseModel):
_content_type = models.ForeignKey(ContentType)
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def __unicode__(self):
return unicode(self._content_type)
def _process_context(self, context):
""" Use the given model instance as context for rendering
any substitutions.
"""
if 'model_instance' in context:
self.__instance = context['model_instance']
def _populate_from_kwargs(self):
return {'content_type': self._content_type}
def _resolve_value(self, name):
value = super(ModelMetadataBase, self)._resolve_value(name)
try:
return _resolve(value, self.__instance._content_object)
except AttributeError:
return value
class Meta:
abstract = True
unique_together = self.get_unique_together(options)
return ModelMetadataBase
@staticmethod
|
willhardy/django-seo | rollyourown/seo/options.py | Options._register_elements | python | def _register_elements(self, elements):
self.elements = elements
for key, obj in elements.items():
obj.contribute_to_class(self.metadata, key)
# Create the common Django fields
fields = {}
for key, obj in elements.items():
if obj.editable:
field = obj.get_field()
if not field.help_text:
if key in self.bulk_help_text:
field.help_text = self.bulk_help_text[key]
fields[key] = field
# 0. Abstract base model with common fields
base_meta = type('Meta', (), self.original_meta)
class BaseMeta(base_meta):
abstract = True
app_label = 'seo'
fields['Meta'] = BaseMeta
# Do we need this?
fields['__module__'] = __name__ #attrs['__module__']
self.MetadataBaseModel = type('%sBase' % self.name, (models.Model,), fields) | Takes elements from the metadata class and creates a base model for all backend models . | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/options.py#L38-L64 | null | class Options(object):
def __init__(self, meta, help_text=None):
self.use_sites = meta.pop('use_sites', False)
self.use_i18n = meta.pop('use_i18n', False)
self.use_redirect = meta.pop('use_redirect', False)
self.use_cache = meta.pop('use_cache', False)
self.groups = meta.pop('groups', {})
self.seo_views = meta.pop('seo_views', [])
self.verbose_name = meta.pop('verbose_name', None)
self.verbose_name_plural = meta.pop('verbose_name_plural', None)
self.backends = list(meta.pop('backends', ('path', 'modelinstance', 'model', 'view')))
self._set_seo_models(meta.pop('seo_models', []))
self.bulk_help_text = help_text
self.original_meta = meta
self.models = SortedDict()
self.name = None
self.elements = None
self.metadata = None
def get_model(self, name):
try:
return self.models[name]
except KeyError:
return None
def _update_from_name(self, name):
self.name = name
self.verbose_name = self.verbose_name or get_verbose_name(name)
self.verbose_name_plural = self.verbose_name_plural or self.verbose_name + 's'
def _add_backend(self, backend):
""" Builds a subclass model for the given backend """
md_type = backend.verbose_name
base = backend().get_model(self)
# TODO: Rename this field
new_md_attrs = {'_metadata': self.metadata, '__module__': __name__ }
new_md_meta = {}
new_md_meta['verbose_name'] = '%s (%s)' % (self.verbose_name, md_type)
new_md_meta['verbose_name_plural'] = '%s (%s)' % (self.verbose_name_plural, md_type)
new_md_meta['unique_together'] = base._meta.unique_together
new_md_attrs['Meta'] = type("Meta", (), new_md_meta)
new_md_attrs['_metadata_type'] = backend.name
model = type("%s%s"%(self.name,"".join(md_type.split())), (base, self.MetadataBaseModel), new_md_attrs.copy())
self.models[backend.name] = model
# This is a little dangerous, but because we set __module__ to __name__, the model needs tobe accessible here
globals()[model.__name__] = model
def _set_seo_models(self, value):
""" Gets the actual models to be used. """
seo_models = []
for model_name in value:
if "." in model_name:
app_label, model_name = model_name.split(".", 1)
model = models.get_model(app_label, model_name)
if model:
seo_models.append(model)
else:
app = models.get_app(model_name)
if app:
seo_models.extend(models.get_models(app))
self.seo_models = seo_models
|
willhardy/django-seo | rollyourown/seo/options.py | Options._add_backend | python | def _add_backend(self, backend):
md_type = backend.verbose_name
base = backend().get_model(self)
# TODO: Rename this field
new_md_attrs = {'_metadata': self.metadata, '__module__': __name__ }
new_md_meta = {}
new_md_meta['verbose_name'] = '%s (%s)' % (self.verbose_name, md_type)
new_md_meta['verbose_name_plural'] = '%s (%s)' % (self.verbose_name_plural, md_type)
new_md_meta['unique_together'] = base._meta.unique_together
new_md_attrs['Meta'] = type("Meta", (), new_md_meta)
new_md_attrs['_metadata_type'] = backend.name
model = type("%s%s"%(self.name,"".join(md_type.split())), (base, self.MetadataBaseModel), new_md_attrs.copy())
self.models[backend.name] = model
# This is a little dangerous, but because we set __module__ to __name__, the model needs tobe accessible here
globals()[model.__name__] = model | Builds a subclass model for the given backend | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/options.py#L66-L82 | null | class Options(object):
def __init__(self, meta, help_text=None):
self.use_sites = meta.pop('use_sites', False)
self.use_i18n = meta.pop('use_i18n', False)
self.use_redirect = meta.pop('use_redirect', False)
self.use_cache = meta.pop('use_cache', False)
self.groups = meta.pop('groups', {})
self.seo_views = meta.pop('seo_views', [])
self.verbose_name = meta.pop('verbose_name', None)
self.verbose_name_plural = meta.pop('verbose_name_plural', None)
self.backends = list(meta.pop('backends', ('path', 'modelinstance', 'model', 'view')))
self._set_seo_models(meta.pop('seo_models', []))
self.bulk_help_text = help_text
self.original_meta = meta
self.models = SortedDict()
self.name = None
self.elements = None
self.metadata = None
def get_model(self, name):
try:
return self.models[name]
except KeyError:
return None
def _update_from_name(self, name):
self.name = name
self.verbose_name = self.verbose_name or get_verbose_name(name)
self.verbose_name_plural = self.verbose_name_plural or self.verbose_name + 's'
def _register_elements(self, elements):
""" Takes elements from the metadata class and creates a base model for all backend models .
"""
self.elements = elements
for key, obj in elements.items():
obj.contribute_to_class(self.metadata, key)
# Create the common Django fields
fields = {}
for key, obj in elements.items():
if obj.editable:
field = obj.get_field()
if not field.help_text:
if key in self.bulk_help_text:
field.help_text = self.bulk_help_text[key]
fields[key] = field
# 0. Abstract base model with common fields
base_meta = type('Meta', (), self.original_meta)
class BaseMeta(base_meta):
abstract = True
app_label = 'seo'
fields['Meta'] = BaseMeta
# Do we need this?
fields['__module__'] = __name__ #attrs['__module__']
self.MetadataBaseModel = type('%sBase' % self.name, (models.Model,), fields)
def _set_seo_models(self, value):
""" Gets the actual models to be used. """
seo_models = []
for model_name in value:
if "." in model_name:
app_label, model_name = model_name.split(".", 1)
model = models.get_model(app_label, model_name)
if model:
seo_models.append(model)
else:
app = models.get_app(model_name)
if app:
seo_models.extend(models.get_models(app))
self.seo_models = seo_models
|
willhardy/django-seo | rollyourown/seo/options.py | Options._set_seo_models | python | def _set_seo_models(self, value):
seo_models = []
for model_name in value:
if "." in model_name:
app_label, model_name = model_name.split(".", 1)
model = models.get_model(app_label, model_name)
if model:
seo_models.append(model)
else:
app = models.get_app(model_name)
if app:
seo_models.extend(models.get_models(app))
self.seo_models = seo_models | Gets the actual models to be used. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/options.py#L84-L98 | null | class Options(object):
def __init__(self, meta, help_text=None):
self.use_sites = meta.pop('use_sites', False)
self.use_i18n = meta.pop('use_i18n', False)
self.use_redirect = meta.pop('use_redirect', False)
self.use_cache = meta.pop('use_cache', False)
self.groups = meta.pop('groups', {})
self.seo_views = meta.pop('seo_views', [])
self.verbose_name = meta.pop('verbose_name', None)
self.verbose_name_plural = meta.pop('verbose_name_plural', None)
self.backends = list(meta.pop('backends', ('path', 'modelinstance', 'model', 'view')))
self._set_seo_models(meta.pop('seo_models', []))
self.bulk_help_text = help_text
self.original_meta = meta
self.models = SortedDict()
self.name = None
self.elements = None
self.metadata = None
def get_model(self, name):
try:
return self.models[name]
except KeyError:
return None
def _update_from_name(self, name):
self.name = name
self.verbose_name = self.verbose_name or get_verbose_name(name)
self.verbose_name_plural = self.verbose_name_plural or self.verbose_name + 's'
def _register_elements(self, elements):
""" Takes elements from the metadata class and creates a base model for all backend models .
"""
self.elements = elements
for key, obj in elements.items():
obj.contribute_to_class(self.metadata, key)
# Create the common Django fields
fields = {}
for key, obj in elements.items():
if obj.editable:
field = obj.get_field()
if not field.help_text:
if key in self.bulk_help_text:
field.help_text = self.bulk_help_text[key]
fields[key] = field
# 0. Abstract base model with common fields
base_meta = type('Meta', (), self.original_meta)
class BaseMeta(base_meta):
abstract = True
app_label = 'seo'
fields['Meta'] = BaseMeta
# Do we need this?
fields['__module__'] = __name__ #attrs['__module__']
self.MetadataBaseModel = type('%sBase' % self.name, (models.Model,), fields)
def _add_backend(self, backend):
""" Builds a subclass model for the given backend """
md_type = backend.verbose_name
base = backend().get_model(self)
# TODO: Rename this field
new_md_attrs = {'_metadata': self.metadata, '__module__': __name__ }
new_md_meta = {}
new_md_meta['verbose_name'] = '%s (%s)' % (self.verbose_name, md_type)
new_md_meta['verbose_name_plural'] = '%s (%s)' % (self.verbose_name_plural, md_type)
new_md_meta['unique_together'] = base._meta.unique_together
new_md_attrs['Meta'] = type("Meta", (), new_md_meta)
new_md_attrs['_metadata_type'] = backend.name
model = type("%s%s"%(self.name,"".join(md_type.split())), (base, self.MetadataBaseModel), new_md_attrs.copy())
self.models[backend.name] = model
# This is a little dangerous, but because we set __module__ to __name__, the model needs tobe accessible here
globals()[model.__name__] = model
def _set_seo_models(self, value):
""" Gets the actual models to be used. """
seo_models = []
for model_name in value:
if "." in model_name:
app_label, model_name = model_name.split(".", 1)
model = models.get_model(app_label, model_name)
if model:
seo_models.append(model)
else:
app = models.get_app(model_name)
if app:
seo_models.extend(models.get_models(app))
self.seo_models = seo_models
|
willhardy/django-seo | rollyourown/seo/fields.py | MetadataField.validate | python | def validate(self):
if not self.editable:
assert self.populate_from is not NotSet, u"If field (%s) is not editable, you must set populate_from" % self.name | Discover certain illegal configurations | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/fields.py#L72-L75 | null | class MetadataField(object):
creation_counter = 0
def __init__(self, name, head, editable, populate_from, valid_tags, choices, help_text, verbose_name, field, field_kwargs):
self.name = name
self.head = head
self.editable = editable
self.populate_from = populate_from
self.help_text = help_text
self.field = field or models.CharField
self.verbose_name = verbose_name
if field_kwargs is None: field_kwargs = {}
self.field_kwargs = field_kwargs
if choices and isinstance(choices[0], basestring):
choices = [(c, c) for c in choices]
field_kwargs.setdefault('choices', choices)
# If valid_tags is a string, tags are space separated words
if isinstance(valid_tags, basestring):
valid_tags = valid_tags.split()
if valid_tags is not None:
valid_tags = set(valid_tags)
self.valid_tags = valid_tags
# Track creation order for field ordering
self.creation_counter = MetadataField.creation_counter
MetadataField.creation_counter += 1
def contribute_to_class(self, cls, name):
if not self.name:
self.name = name
# Populate the hep text from populate_from if it's missing
if not self.help_text and self.populate_from is not NotSet:
if callable(self.populate_from) and hasattr(self.populate_from, 'short_description'):
self.help_text = _('If empty, %s') % self.populate_from.short_description
elif isinstance(self.populate_from, Literal):
self.help_text = _('If empty, \"%s\" will be used.') % self.populate_from.value
elif isinstance(self.populate_from, basestring) and self.populate_from in cls._meta.elements:
field = cls._meta.elements[self.populate_from]
self.help_text = _('If empty, %s will be used.') % field.verbose_name or field.name
elif isinstance(self.populate_from, basestring) and hasattr(cls, self.populate_from):
populate_from = getattr(cls, self.populate_from, None)
if callable(populate_from) and hasattr(populate_from, 'short_description'):
self.help_text = _('If empty, %s') % populate_from.short_description
self.validate()
def get_field(self):
kwargs = self.field_kwargs
if self.help_text:
kwargs.setdefault('help_text', self.help_text)
if self.verbose_name:
kwargs.setdefault('verbose_name', self.help_text)
return self.field(**kwargs)
def clean(self, value):
return value
def render(self, value):
raise NotImplementedError
|
willhardy/django-seo | rollyourown/seo/management/__init__.py | populate_all_metadata | python | def populate_all_metadata():
for Metadata in registry.values():
InstanceMetadata = Metadata._meta.get_model('modelinstance')
if InstanceMetadata is not None:
for model in Metadata._meta.seo_models:
populate_metadata(model, InstanceMetadata) | Create metadata instances for all models in seo_models if empty.
Once you have created a single metadata instance, this will not run.
This is because it is a potentially slow operation that need only be
done once. If you want to ensure that everything is populated, run the
populate_metadata management command. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/management/__init__.py#L30-L41 | [
"def populate_metadata(model, MetadataClass):\n \"\"\" For a given model and metadata class, ensure there is metadata for every instance. \n \"\"\"\n content_type = ContentType.objects.get_for_model(model)\n for instance in model.objects.all():\n create_metadata_instance(MetadataClass, instance)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db.models import signals
from django.db.utils import DatabaseError
from django.contrib.contenttypes.models import ContentType
from rollyourown.seo.base import registry, populate_metadata
from rollyourown.seo import models as seo_models
def _syncdb_handler(app, created_models, verbosity, **kwargs):
for Metadata in registry.values():
InstanceMetadata = Metadata._meta.get_model('modelinstance')
if InstanceMetadata is not None and InstanceMetadata in created_models:
for model in Metadata._meta.seo_models:
content_type = ContentType.objects.get_for_model(model)
if InstanceMetadata.objects.filter(_content_type=content_type):
continue
if verbosity > 0:
print "Populating %s for %s.%s" % (Metadata._meta.verbose_name_plural, model._meta.app_label, model._meta.object_name)
try:
# If a model is using SEO & it's schema is managed by South migrations rather than syncdb, this call will fail when doing an syncdb for the first time.
populate_metadata(model, InstanceMetadata)
except DatabaseError as err:
print "Database Error (%s) when trying to populate %s for %s.%s. Ignoring (as assumed that this is a migration related issue)" % (str(err), Metadata._meta.verbose_name_plural, model._meta.app_label, model._meta.object_name)
pass
signals.post_syncdb.connect(_syncdb_handler, sender=seo_models,
dispatch_uid="rollyourown.seo.management.populate_metadata")
|
willhardy/django-seo | rollyourown/seo/systemviews.py | SystemViews.populate | python | def populate(self):
from django.conf import settings
from django.core import urlresolvers
self.append(("", ""))
urlconf = settings.ROOT_URLCONF
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
# Collect base level views
for key, value in resolver.reverse_dict.items():
if isinstance(key, basestring):
args = value[0][0][1]
url = "/" + value[0][0][0]
self.append((key, " ".join(key.split("_"))))
# Collect namespaces (TODO: merge these two sections into one)
for namespace, url in resolver.namespace_dict.items():
for key, value in url[1].reverse_dict.items():
if isinstance(key, basestring):
args = value[0][0][1]
full_key = '%s:%s' % (namespace, key)
self.append((full_key, "%s: %s" % (namespace, " ".join(key.split("_")))))
self.sort() | Populate this list with all views that take no arguments. | train | https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/systemviews.py#L43-L65 | null | class SystemViews(LazyChoices):
|
bjodah/pyneqsys | pyneqsys/core.py | _NeqSysBase.rms | python | def rms(self, x, params=()):
internal_x, internal_params = self.pre_process(np.asarray(x),
np.asarray(params))
if internal_params.ndim > 1:
raise NotImplementedError("Parameters should be constant.")
result = np.empty(internal_x.size//self.nx)
for idx in range(internal_x.shape[0]):
result[idx] = np.sqrt(np.mean(np.square(self.f_cb(
internal_x[idx, :], internal_params))))
return result | Returns root mean square value of f(x, params) | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L79-L89 | [
"def pre_process(self, x0, params=()):\n \"\"\" Used internally for transformation of variables. \"\"\"\n # Should be used by all methods matching \"solve_*\"\n if self.x_by_name and isinstance(x0, dict):\n x0 = [x0[k] for k in self.names]\n if self.par_by_name and isinstance(params, dict):\n params = [params[k] for k in self.param_names]\n for pre_processor in self.pre_processors:\n x0, params = pre_processor(x0, params)\n return x0, np.atleast_1d(params)\n"
] | class _NeqSysBase(object):
""" Baseclass for system of non-linear equations.
This class contains shared logic used by its subclasses and is not meant to be used
by end-users directly.
"""
def __init__(self, names=None, param_names=None, x_by_name=None, par_by_name=None,
latex_names=None, latex_param_names=None):
self.names = names or ()
self.param_names = param_names or ()
self.x_by_name = x_by_name
self.par_by_name = par_by_name
self.latex_names = latex_names or ()
self.latex_param_names = latex_param_names or ()
def _get_solver_cb(self, solver, attached_solver):
if attached_solver is not None:
if solver is not None:
raise ValueError("solver must be None.")
solver = attached_solver(self)
if callable(solver):
return solver
if solver is None:
solver = os.environ.get('PYNEQSYS_SOLVER', 'scipy')
return getattr(self, '_solve_' + solver)
def solve_series(self, x0, params, varied_data, varied_idx,
internal_x0=None, solver=None, propagate=True, **kwargs):
""" Solve system for a set of parameters in which one is varied
Parameters
----------
x0 : array_like
Guess (subject to ``self.post_processors``)
params : array_like
Parameter values
vaired_data : array_like
Numerical values of the varied parameter.
varied_idx : int or str
Index of the varied parameter (indexing starts at 0).
If ``self.par_by_name`` this should be the name (str) of the varied
parameter.
internal_x0 : array_like (default: None)
Guess (*not* subject to ``self.post_processors``).
Overrides ``x0`` when given.
solver : str or callback
See :meth:`solve`.
propagate : bool (default: True)
Use last successful solution as ``x0`` in consecutive solves.
\\*\\*kwargs :
Keyword arguments pass along to :meth:`solve`.
Returns
-------
xout : array
Of shape ``(varied_data.size, x0.size)``.
info_dicts : list of dictionaries
Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc.
"""
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name:
if isinstance(params, dict):
params = [params[k] for k in self.param_names]
if isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
new_params = np.atleast_1d(np.array(params, dtype=np.float64))
xout = np.empty((len(varied_data), len(x0)))
self.internal_xout = np.empty_like(xout)
self.internal_params_out = np.empty((len(varied_data),
len(new_params)))
info_dicts = []
new_x0 = np.array(x0, dtype=np.float64) # copy
conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys
for idx, value in enumerate(varied_data):
try:
new_params[varied_idx] = value
except TypeError:
new_params = value # e.g. type(new_params) == int
if conds is not None:
kwargs['initial_conditions'] = conds
x, info_dict = self.solve(new_x0, new_params, internal_x0, solver,
**kwargs)
if propagate:
if info_dict['success']:
try:
# See ChainedNeqSys.solve
new_x0 = info_dict['x_vecs'][0]
internal_x0 = info_dict['internal_x_vecs'][0]
conds = info_dict['intermediate_info'][0].get(
'conditions', None)
except:
new_x0 = x
internal_x0 = None
conds = info_dict.get('conditions', None)
xout[idx, :] = x
self.internal_xout[idx, :] = self.internal_x
self.internal_params_out[idx, :] = self.internal_params
info_dicts.append(info_dict)
return xout, info_dicts
def plot_series(self, xres, varied_data, varied_idx, **kwargs):
""" Plots the results from :meth:`solve_series`.
Parameters
----------
xres : array
Of shape ``(varied_data.size, self.nx)``.
varied_data : array
See :meth:`solve_series`.
varied_idx : int or str
See :meth:`solve_series`.
\\*\\*kwargs :
Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.
"""
for attr in 'names latex_names'.split():
if kwargs.get(attr, None) is None:
kwargs[attr] = getattr(self, attr)
ax = plot_series(xres, varied_data, **kwargs)
if self.par_by_name and isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
if self.latex_param_names:
ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx])
elif self.param_names:
ax.set_xlabel(self.param_names[varied_idx])
return ax
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs):
""" Analogous to :meth:`plot_series` but will plot residuals. """
nf = len(self.f_cb(*self.pre_process(xres[0], params)))
xerr = np.empty((xres.shape[0], nf))
new_params = np.array(params)
for idx, row in enumerate(xres):
new_params[varied_idx] = varied_data[idx]
xerr[idx, :] = self.f_cb(*self.pre_process(row, params))
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
def plot_series_residuals_internal(self, varied_data, varied_idx, **kwargs):
""" Analogous to :meth:`plot_series` but for internal residuals from last run. """
nf = len(self.f_cb(*self.pre_process(
self.internal_xout[0], self.internal_params_out[0])))
xerr = np.empty((self.internal_xout.shape[0], nf))
for idx, (res, params) in enumerate(zip(self.internal_xout, self.internal_params_out)):
xerr[idx, :] = self.f_cb(res, params)
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
def solve_and_plot_series(self, x0, params, varied_data, varied_idx, solver=None, plot_kwargs=None,
plot_residuals_kwargs=None, **kwargs):
""" Solve and plot for a series of a varied parameter.
Convenience method, see :meth:`solve_series`, :meth:`plot_series` &
:meth:`plot_series_residuals_internal` for more information.
"""
sol, nfo = self.solve_series(
x0, params, varied_data, varied_idx, solver=solver, **kwargs)
ax_sol = self.plot_series(sol, varied_data, varied_idx, info=nfo,
**(plot_kwargs or {}))
extra = dict(ax_sol=ax_sol, info=nfo)
if plot_residuals_kwargs:
extra['ax_resid'] = self.plot_series_residuals_internal(
varied_data, varied_idx, info=nfo,
**(plot_residuals_kwargs or {})
)
return sol, extra
|
bjodah/pyneqsys | pyneqsys/core.py | _NeqSysBase.solve_series | python | def solve_series(self, x0, params, varied_data, varied_idx,
internal_x0=None, solver=None, propagate=True, **kwargs):
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name:
if isinstance(params, dict):
params = [params[k] for k in self.param_names]
if isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
new_params = np.atleast_1d(np.array(params, dtype=np.float64))
xout = np.empty((len(varied_data), len(x0)))
self.internal_xout = np.empty_like(xout)
self.internal_params_out = np.empty((len(varied_data),
len(new_params)))
info_dicts = []
new_x0 = np.array(x0, dtype=np.float64) # copy
conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys
for idx, value in enumerate(varied_data):
try:
new_params[varied_idx] = value
except TypeError:
new_params = value # e.g. type(new_params) == int
if conds is not None:
kwargs['initial_conditions'] = conds
x, info_dict = self.solve(new_x0, new_params, internal_x0, solver,
**kwargs)
if propagate:
if info_dict['success']:
try:
# See ChainedNeqSys.solve
new_x0 = info_dict['x_vecs'][0]
internal_x0 = info_dict['internal_x_vecs'][0]
conds = info_dict['intermediate_info'][0].get(
'conditions', None)
except:
new_x0 = x
internal_x0 = None
conds = info_dict.get('conditions', None)
xout[idx, :] = x
self.internal_xout[idx, :] = self.internal_x
self.internal_params_out[idx, :] = self.internal_params
info_dicts.append(info_dict)
return xout, info_dicts | Solve system for a set of parameters in which one is varied
Parameters
----------
x0 : array_like
Guess (subject to ``self.post_processors``)
params : array_like
Parameter values
vaired_data : array_like
Numerical values of the varied parameter.
varied_idx : int or str
Index of the varied parameter (indexing starts at 0).
If ``self.par_by_name`` this should be the name (str) of the varied
parameter.
internal_x0 : array_like (default: None)
Guess (*not* subject to ``self.post_processors``).
Overrides ``x0`` when given.
solver : str or callback
See :meth:`solve`.
propagate : bool (default: True)
Use last successful solution as ``x0`` in consecutive solves.
\\*\\*kwargs :
Keyword arguments pass along to :meth:`solve`.
Returns
-------
xout : array
Of shape ``(varied_data.size, x0.size)``.
info_dicts : list of dictionaries
Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L91-L166 | [
"def solve(self, x0, params=(), internal_x0=None, solver=None, attached_solver=None, **kwargs):\n \"\"\" Solve with user specified ``solver`` choice.\n\n Parameters\n ----------\n x0: 1D array of floats\n Guess (subject to ``self.post_processors``)\n params: 1D array_like of floats\n Parameters (subject to ``self.post_processors``)\n internal_x0: 1D array of floats\n When given it overrides (processed) ``x0``. ``internal_x0`` is not\n subject to ``self.post_processors``.\n solver: str or callable or None or iterable of such\n if str: uses _solve_``solver``(\\*args, \\*\\*kwargs).\n if ``None``: chooses from PYNEQSYS_SOLVER environment variable.\n if iterable: chain solving.\n attached_solver: callable factory\n Invokes: solver = attached_solver(self).\n\n Returns\n -------\n array:\n solution vector (post-processed by self.post_processors)\n dict:\n info dictionary containing 'success', 'nfev', 'njev' etc.\n\n Examples\n --------\n >>> neqsys = NeqSys(2, 2, lambda x, p: [\n ... (x[0] - x[1])**p[0]/2 + x[0] - 1,\n ... (x[1] - x[0])**p[0]/2 + x[1]\n ... ])\n >>> x, sol = neqsys.solve([1, 0], [3], solver=(None, 'mpmath'))\n >>> assert sol['success']\n >>> print(x)\n [0.841163901914009663684741869855]\n [0.158836098085990336315258130144]\n\n \"\"\"\n if not isinstance(solver, (tuple, list)):\n solver = [solver]\n if not isinstance(attached_solver, (tuple, list)):\n attached_solver = [attached_solver] + [None]*(len(solver) - 1)\n _x0, self.internal_params = self.pre_process(x0, params)\n for solv, attached_solv in zip(solver, attached_solver):\n if internal_x0 is not None:\n _x0 = internal_x0\n elif self.internal_x0_cb is not None:\n _x0 = self.internal_x0_cb(x0, params)\n\n nfo = self._get_solver_cb(solv, attached_solv)(_x0, **kwargs)\n _x0 = nfo['x'].copy()\n self.internal_x = _x0\n x0 = self.post_process(self.internal_x, self.internal_params)[0]\n return x0, nfo\n"
] | class _NeqSysBase(object):
""" Baseclass for system of non-linear equations.
This class contains shared logic used by its subclasses and is not meant to be used
by end-users directly.
"""
def __init__(self, names=None, param_names=None, x_by_name=None, par_by_name=None,
latex_names=None, latex_param_names=None):
self.names = names or ()
self.param_names = param_names or ()
self.x_by_name = x_by_name
self.par_by_name = par_by_name
self.latex_names = latex_names or ()
self.latex_param_names = latex_param_names or ()
def _get_solver_cb(self, solver, attached_solver):
if attached_solver is not None:
if solver is not None:
raise ValueError("solver must be None.")
solver = attached_solver(self)
if callable(solver):
return solver
if solver is None:
solver = os.environ.get('PYNEQSYS_SOLVER', 'scipy')
return getattr(self, '_solve_' + solver)
def rms(self, x, params=()):
""" Returns root mean square value of f(x, params) """
internal_x, internal_params = self.pre_process(np.asarray(x),
np.asarray(params))
if internal_params.ndim > 1:
raise NotImplementedError("Parameters should be constant.")
result = np.empty(internal_x.size//self.nx)
for idx in range(internal_x.shape[0]):
result[idx] = np.sqrt(np.mean(np.square(self.f_cb(
internal_x[idx, :], internal_params))))
return result
def plot_series(self, xres, varied_data, varied_idx, **kwargs):
""" Plots the results from :meth:`solve_series`.
Parameters
----------
xres : array
Of shape ``(varied_data.size, self.nx)``.
varied_data : array
See :meth:`solve_series`.
varied_idx : int or str
See :meth:`solve_series`.
\\*\\*kwargs :
Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.
"""
for attr in 'names latex_names'.split():
if kwargs.get(attr, None) is None:
kwargs[attr] = getattr(self, attr)
ax = plot_series(xres, varied_data, **kwargs)
if self.par_by_name and isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
if self.latex_param_names:
ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx])
elif self.param_names:
ax.set_xlabel(self.param_names[varied_idx])
return ax
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs):
""" Analogous to :meth:`plot_series` but will plot residuals. """
nf = len(self.f_cb(*self.pre_process(xres[0], params)))
xerr = np.empty((xres.shape[0], nf))
new_params = np.array(params)
for idx, row in enumerate(xres):
new_params[varied_idx] = varied_data[idx]
xerr[idx, :] = self.f_cb(*self.pre_process(row, params))
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
def plot_series_residuals_internal(self, varied_data, varied_idx, **kwargs):
""" Analogous to :meth:`plot_series` but for internal residuals from last run. """
nf = len(self.f_cb(*self.pre_process(
self.internal_xout[0], self.internal_params_out[0])))
xerr = np.empty((self.internal_xout.shape[0], nf))
for idx, (res, params) in enumerate(zip(self.internal_xout, self.internal_params_out)):
xerr[idx, :] = self.f_cb(res, params)
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
def solve_and_plot_series(self, x0, params, varied_data, varied_idx, solver=None, plot_kwargs=None,
plot_residuals_kwargs=None, **kwargs):
""" Solve and plot for a series of a varied parameter.
Convenience method, see :meth:`solve_series`, :meth:`plot_series` &
:meth:`plot_series_residuals_internal` for more information.
"""
sol, nfo = self.solve_series(
x0, params, varied_data, varied_idx, solver=solver, **kwargs)
ax_sol = self.plot_series(sol, varied_data, varied_idx, info=nfo,
**(plot_kwargs or {}))
extra = dict(ax_sol=ax_sol, info=nfo)
if plot_residuals_kwargs:
extra['ax_resid'] = self.plot_series_residuals_internal(
varied_data, varied_idx, info=nfo,
**(plot_residuals_kwargs or {})
)
return sol, extra
|
bjodah/pyneqsys | pyneqsys/core.py | _NeqSysBase.plot_series | python | def plot_series(self, xres, varied_data, varied_idx, **kwargs):
for attr in 'names latex_names'.split():
if kwargs.get(attr, None) is None:
kwargs[attr] = getattr(self, attr)
ax = plot_series(xres, varied_data, **kwargs)
if self.par_by_name and isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
if self.latex_param_names:
ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx])
elif self.param_names:
ax.set_xlabel(self.param_names[varied_idx])
return ax | Plots the results from :meth:`solve_series`.
Parameters
----------
xres : array
Of shape ``(varied_data.size, self.nx)``.
varied_data : array
See :meth:`solve_series`.
varied_idx : int or str
See :meth:`solve_series`.
\\*\\*kwargs :
Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L168-L193 | [
"def plot_series(xres, varied_data, indices=None, info=None,\n fail_vline=None, plot_kwargs_cb=None,\n ls=('-', '--', ':', '-.'),\n c=('k', 'r', 'g', 'b', 'c', 'm', 'y'), labels=None,\n ax=None, names=None, latex_names=None):\n \"\"\" Plot the values of the solution vector vs the varied parameter.\n\n Parameters\n ----------\n xres : array\n Solution vector of shape ``(varied_data.size, x0.size)``.\n varied_data : array\n Numerical values of the varied parameter.\n indices : iterable of integers, optional\n Indices of variables to be plotted. default: all\n fail_vline : bool\n Show vertical lines where the solver failed.\n plot_kwargs_cb : callable\n Takes the index as single argument, returns a dict passed to\n the plotting function\n ls : iterable of str\n Linestyles.\n c : iterable of str\n Colors.\n labels : iterable of str\n ax : matplotlib Axes instance\n names : iterable of str\n latex_names : iterable of str\n\n \"\"\"\n import matplotlib.pyplot as plt\n\n if indices is None:\n indices = range(xres.shape[1])\n\n if fail_vline is None:\n if info is None:\n fail_vline = False\n else:\n fail_vline = True\n\n if ax is None:\n ax = plt.subplot(1, 1, 1)\n\n if labels is None:\n labels = names if latex_names is None else ['$%s$' % ln.strip('$') for ln in latex_names]\n\n if plot_kwargs_cb is None:\n def plot_kwargs_cb(idx, labels=None):\n kwargs = {'ls': ls[idx % len(ls)],\n 'c': c[idx % len(c)]}\n if labels:\n kwargs['label'] = labels[idx]\n return kwargs\n else:\n plot_kwargs_cb = plot_kwargs_cb or (lambda idx: {})\n\n for idx in indices:\n ax.plot(varied_data, xres[:, idx], **plot_kwargs_cb(idx, labels=labels))\n\n if fail_vline:\n for i, nfo in enumerate(info):\n if not nfo['success']:\n ax.axvline(varied_data[i], c='k', ls='--')\n return ax\n"
] | class _NeqSysBase(object):
""" Baseclass for system of non-linear equations.
This class contains shared logic used by its subclasses and is not meant to be used
by end-users directly.
"""
def __init__(self, names=None, param_names=None, x_by_name=None, par_by_name=None,
latex_names=None, latex_param_names=None):
self.names = names or ()
self.param_names = param_names or ()
self.x_by_name = x_by_name
self.par_by_name = par_by_name
self.latex_names = latex_names or ()
self.latex_param_names = latex_param_names or ()
def _get_solver_cb(self, solver, attached_solver):
if attached_solver is not None:
if solver is not None:
raise ValueError("solver must be None.")
solver = attached_solver(self)
if callable(solver):
return solver
if solver is None:
solver = os.environ.get('PYNEQSYS_SOLVER', 'scipy')
return getattr(self, '_solve_' + solver)
def rms(self, x, params=()):
""" Returns root mean square value of f(x, params) """
internal_x, internal_params = self.pre_process(np.asarray(x),
np.asarray(params))
if internal_params.ndim > 1:
raise NotImplementedError("Parameters should be constant.")
result = np.empty(internal_x.size//self.nx)
for idx in range(internal_x.shape[0]):
result[idx] = np.sqrt(np.mean(np.square(self.f_cb(
internal_x[idx, :], internal_params))))
return result
def solve_series(self, x0, params, varied_data, varied_idx,
internal_x0=None, solver=None, propagate=True, **kwargs):
""" Solve system for a set of parameters in which one is varied
Parameters
----------
x0 : array_like
Guess (subject to ``self.post_processors``)
params : array_like
Parameter values
vaired_data : array_like
Numerical values of the varied parameter.
varied_idx : int or str
Index of the varied parameter (indexing starts at 0).
If ``self.par_by_name`` this should be the name (str) of the varied
parameter.
internal_x0 : array_like (default: None)
Guess (*not* subject to ``self.post_processors``).
Overrides ``x0`` when given.
solver : str or callback
See :meth:`solve`.
propagate : bool (default: True)
Use last successful solution as ``x0`` in consecutive solves.
\\*\\*kwargs :
Keyword arguments pass along to :meth:`solve`.
Returns
-------
xout : array
Of shape ``(varied_data.size, x0.size)``.
info_dicts : list of dictionaries
Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc.
"""
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name:
if isinstance(params, dict):
params = [params[k] for k in self.param_names]
if isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
new_params = np.atleast_1d(np.array(params, dtype=np.float64))
xout = np.empty((len(varied_data), len(x0)))
self.internal_xout = np.empty_like(xout)
self.internal_params_out = np.empty((len(varied_data),
len(new_params)))
info_dicts = []
new_x0 = np.array(x0, dtype=np.float64) # copy
conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys
for idx, value in enumerate(varied_data):
try:
new_params[varied_idx] = value
except TypeError:
new_params = value # e.g. type(new_params) == int
if conds is not None:
kwargs['initial_conditions'] = conds
x, info_dict = self.solve(new_x0, new_params, internal_x0, solver,
**kwargs)
if propagate:
if info_dict['success']:
try:
# See ChainedNeqSys.solve
new_x0 = info_dict['x_vecs'][0]
internal_x0 = info_dict['internal_x_vecs'][0]
conds = info_dict['intermediate_info'][0].get(
'conditions', None)
except:
new_x0 = x
internal_x0 = None
conds = info_dict.get('conditions', None)
xout[idx, :] = x
self.internal_xout[idx, :] = self.internal_x
self.internal_params_out[idx, :] = self.internal_params
info_dicts.append(info_dict)
return xout, info_dicts
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs):
""" Analogous to :meth:`plot_series` but will plot residuals. """
nf = len(self.f_cb(*self.pre_process(xres[0], params)))
xerr = np.empty((xres.shape[0], nf))
new_params = np.array(params)
for idx, row in enumerate(xres):
new_params[varied_idx] = varied_data[idx]
xerr[idx, :] = self.f_cb(*self.pre_process(row, params))
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
def plot_series_residuals_internal(self, varied_data, varied_idx, **kwargs):
""" Analogous to :meth:`plot_series` but for internal residuals from last run. """
nf = len(self.f_cb(*self.pre_process(
self.internal_xout[0], self.internal_params_out[0])))
xerr = np.empty((self.internal_xout.shape[0], nf))
for idx, (res, params) in enumerate(zip(self.internal_xout, self.internal_params_out)):
xerr[idx, :] = self.f_cb(res, params)
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
def solve_and_plot_series(self, x0, params, varied_data, varied_idx, solver=None, plot_kwargs=None,
plot_residuals_kwargs=None, **kwargs):
""" Solve and plot for a series of a varied parameter.
Convenience method, see :meth:`solve_series`, :meth:`plot_series` &
:meth:`plot_series_residuals_internal` for more information.
"""
sol, nfo = self.solve_series(
x0, params, varied_data, varied_idx, solver=solver, **kwargs)
ax_sol = self.plot_series(sol, varied_data, varied_idx, info=nfo,
**(plot_kwargs or {}))
extra = dict(ax_sol=ax_sol, info=nfo)
if plot_residuals_kwargs:
extra['ax_resid'] = self.plot_series_residuals_internal(
varied_data, varied_idx, info=nfo,
**(plot_residuals_kwargs or {})
)
return sol, extra
|
bjodah/pyneqsys | pyneqsys/core.py | _NeqSysBase.plot_series_residuals | python | def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs):
nf = len(self.f_cb(*self.pre_process(xres[0], params)))
xerr = np.empty((xres.shape[0], nf))
new_params = np.array(params)
for idx, row in enumerate(xres):
new_params[varied_idx] = varied_data[idx]
xerr[idx, :] = self.f_cb(*self.pre_process(row, params))
return self.plot_series(xerr, varied_data, varied_idx, **kwargs) | Analogous to :meth:`plot_series` but will plot residuals. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L195-L204 | [
"def plot_series(self, xres, varied_data, varied_idx, **kwargs):\n \"\"\" Plots the results from :meth:`solve_series`.\n\n Parameters\n ----------\n xres : array\n Of shape ``(varied_data.size, self.nx)``.\n varied_data : array\n See :meth:`solve_series`.\n varied_idx : int or str\n See :meth:`solve_series`.\n \\\\*\\\\*kwargs :\n Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.\n\n \"\"\"\n for attr in 'names latex_names'.split():\n if kwargs.get(attr, None) is None:\n kwargs[attr] = getattr(self, attr)\n ax = plot_series(xres, varied_data, **kwargs)\n if self.par_by_name and isinstance(varied_idx, str):\n varied_idx = self.param_names.index(varied_idx)\n if self.latex_param_names:\n ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx])\n elif self.param_names:\n ax.set_xlabel(self.param_names[varied_idx])\n return ax\n"
] | class _NeqSysBase(object):
""" Baseclass for system of non-linear equations.
This class contains shared logic used by its subclasses and is not meant to be used
by end-users directly.
"""
def __init__(self, names=None, param_names=None, x_by_name=None, par_by_name=None,
latex_names=None, latex_param_names=None):
self.names = names or ()
self.param_names = param_names or ()
self.x_by_name = x_by_name
self.par_by_name = par_by_name
self.latex_names = latex_names or ()
self.latex_param_names = latex_param_names or ()
def _get_solver_cb(self, solver, attached_solver):
if attached_solver is not None:
if solver is not None:
raise ValueError("solver must be None.")
solver = attached_solver(self)
if callable(solver):
return solver
if solver is None:
solver = os.environ.get('PYNEQSYS_SOLVER', 'scipy')
return getattr(self, '_solve_' + solver)
def rms(self, x, params=()):
""" Returns root mean square value of f(x, params) """
internal_x, internal_params = self.pre_process(np.asarray(x),
np.asarray(params))
if internal_params.ndim > 1:
raise NotImplementedError("Parameters should be constant.")
result = np.empty(internal_x.size//self.nx)
for idx in range(internal_x.shape[0]):
result[idx] = np.sqrt(np.mean(np.square(self.f_cb(
internal_x[idx, :], internal_params))))
return result
def solve_series(self, x0, params, varied_data, varied_idx,
internal_x0=None, solver=None, propagate=True, **kwargs):
""" Solve system for a set of parameters in which one is varied
Parameters
----------
x0 : array_like
Guess (subject to ``self.post_processors``)
params : array_like
Parameter values
vaired_data : array_like
Numerical values of the varied parameter.
varied_idx : int or str
Index of the varied parameter (indexing starts at 0).
If ``self.par_by_name`` this should be the name (str) of the varied
parameter.
internal_x0 : array_like (default: None)
Guess (*not* subject to ``self.post_processors``).
Overrides ``x0`` when given.
solver : str or callback
See :meth:`solve`.
propagate : bool (default: True)
Use last successful solution as ``x0`` in consecutive solves.
\\*\\*kwargs :
Keyword arguments pass along to :meth:`solve`.
Returns
-------
xout : array
Of shape ``(varied_data.size, x0.size)``.
info_dicts : list of dictionaries
Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc.
"""
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name:
if isinstance(params, dict):
params = [params[k] for k in self.param_names]
if isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
new_params = np.atleast_1d(np.array(params, dtype=np.float64))
xout = np.empty((len(varied_data), len(x0)))
self.internal_xout = np.empty_like(xout)
self.internal_params_out = np.empty((len(varied_data),
len(new_params)))
info_dicts = []
new_x0 = np.array(x0, dtype=np.float64) # copy
conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys
for idx, value in enumerate(varied_data):
try:
new_params[varied_idx] = value
except TypeError:
new_params = value # e.g. type(new_params) == int
if conds is not None:
kwargs['initial_conditions'] = conds
x, info_dict = self.solve(new_x0, new_params, internal_x0, solver,
**kwargs)
if propagate:
if info_dict['success']:
try:
# See ChainedNeqSys.solve
new_x0 = info_dict['x_vecs'][0]
internal_x0 = info_dict['internal_x_vecs'][0]
conds = info_dict['intermediate_info'][0].get(
'conditions', None)
except:
new_x0 = x
internal_x0 = None
conds = info_dict.get('conditions', None)
xout[idx, :] = x
self.internal_xout[idx, :] = self.internal_x
self.internal_params_out[idx, :] = self.internal_params
info_dicts.append(info_dict)
return xout, info_dicts
def plot_series(self, xres, varied_data, varied_idx, **kwargs):
""" Plots the results from :meth:`solve_series`.
Parameters
----------
xres : array
Of shape ``(varied_data.size, self.nx)``.
varied_data : array
See :meth:`solve_series`.
varied_idx : int or str
See :meth:`solve_series`.
\\*\\*kwargs :
Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.
"""
for attr in 'names latex_names'.split():
if kwargs.get(attr, None) is None:
kwargs[attr] = getattr(self, attr)
ax = plot_series(xres, varied_data, **kwargs)
if self.par_by_name and isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
if self.latex_param_names:
ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx])
elif self.param_names:
ax.set_xlabel(self.param_names[varied_idx])
return ax
def plot_series_residuals_internal(self, varied_data, varied_idx, **kwargs):
""" Analogous to :meth:`plot_series` but for internal residuals from last run. """
nf = len(self.f_cb(*self.pre_process(
self.internal_xout[0], self.internal_params_out[0])))
xerr = np.empty((self.internal_xout.shape[0], nf))
for idx, (res, params) in enumerate(zip(self.internal_xout, self.internal_params_out)):
xerr[idx, :] = self.f_cb(res, params)
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
def solve_and_plot_series(self, x0, params, varied_data, varied_idx, solver=None, plot_kwargs=None,
plot_residuals_kwargs=None, **kwargs):
""" Solve and plot for a series of a varied parameter.
Convenience method, see :meth:`solve_series`, :meth:`plot_series` &
:meth:`plot_series_residuals_internal` for more information.
"""
sol, nfo = self.solve_series(
x0, params, varied_data, varied_idx, solver=solver, **kwargs)
ax_sol = self.plot_series(sol, varied_data, varied_idx, info=nfo,
**(plot_kwargs or {}))
extra = dict(ax_sol=ax_sol, info=nfo)
if plot_residuals_kwargs:
extra['ax_resid'] = self.plot_series_residuals_internal(
varied_data, varied_idx, info=nfo,
**(plot_residuals_kwargs or {})
)
return sol, extra
|
bjodah/pyneqsys | pyneqsys/core.py | _NeqSysBase.plot_series_residuals_internal | python | def plot_series_residuals_internal(self, varied_data, varied_idx, **kwargs):
nf = len(self.f_cb(*self.pre_process(
self.internal_xout[0], self.internal_params_out[0])))
xerr = np.empty((self.internal_xout.shape[0], nf))
for idx, (res, params) in enumerate(zip(self.internal_xout, self.internal_params_out)):
xerr[idx, :] = self.f_cb(res, params)
return self.plot_series(xerr, varied_data, varied_idx, **kwargs) | Analogous to :meth:`plot_series` but for internal residuals from last run. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L206-L213 | [
"def plot_series(self, xres, varied_data, varied_idx, **kwargs):\n \"\"\" Plots the results from :meth:`solve_series`.\n\n Parameters\n ----------\n xres : array\n Of shape ``(varied_data.size, self.nx)``.\n varied_data : array\n See :meth:`solve_series`.\n varied_idx : int or str\n See :meth:`solve_series`.\n \\\\*\\\\*kwargs :\n Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.\n\n \"\"\"\n for attr in 'names latex_names'.split():\n if kwargs.get(attr, None) is None:\n kwargs[attr] = getattr(self, attr)\n ax = plot_series(xres, varied_data, **kwargs)\n if self.par_by_name and isinstance(varied_idx, str):\n varied_idx = self.param_names.index(varied_idx)\n if self.latex_param_names:\n ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx])\n elif self.param_names:\n ax.set_xlabel(self.param_names[varied_idx])\n return ax\n"
] | class _NeqSysBase(object):
""" Baseclass for system of non-linear equations.
This class contains shared logic used by its subclasses and is not meant to be used
by end-users directly.
"""
def __init__(self, names=None, param_names=None, x_by_name=None, par_by_name=None,
latex_names=None, latex_param_names=None):
self.names = names or ()
self.param_names = param_names or ()
self.x_by_name = x_by_name
self.par_by_name = par_by_name
self.latex_names = latex_names or ()
self.latex_param_names = latex_param_names or ()
def _get_solver_cb(self, solver, attached_solver):
if attached_solver is not None:
if solver is not None:
raise ValueError("solver must be None.")
solver = attached_solver(self)
if callable(solver):
return solver
if solver is None:
solver = os.environ.get('PYNEQSYS_SOLVER', 'scipy')
return getattr(self, '_solve_' + solver)
def rms(self, x, params=()):
""" Returns root mean square value of f(x, params) """
internal_x, internal_params = self.pre_process(np.asarray(x),
np.asarray(params))
if internal_params.ndim > 1:
raise NotImplementedError("Parameters should be constant.")
result = np.empty(internal_x.size//self.nx)
for idx in range(internal_x.shape[0]):
result[idx] = np.sqrt(np.mean(np.square(self.f_cb(
internal_x[idx, :], internal_params))))
return result
def solve_series(self, x0, params, varied_data, varied_idx,
internal_x0=None, solver=None, propagate=True, **kwargs):
""" Solve system for a set of parameters in which one is varied
Parameters
----------
x0 : array_like
Guess (subject to ``self.post_processors``)
params : array_like
Parameter values
vaired_data : array_like
Numerical values of the varied parameter.
varied_idx : int or str
Index of the varied parameter (indexing starts at 0).
If ``self.par_by_name`` this should be the name (str) of the varied
parameter.
internal_x0 : array_like (default: None)
Guess (*not* subject to ``self.post_processors``).
Overrides ``x0`` when given.
solver : str or callback
See :meth:`solve`.
propagate : bool (default: True)
Use last successful solution as ``x0`` in consecutive solves.
\\*\\*kwargs :
Keyword arguments pass along to :meth:`solve`.
Returns
-------
xout : array
Of shape ``(varied_data.size, x0.size)``.
info_dicts : list of dictionaries
Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc.
"""
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name:
if isinstance(params, dict):
params = [params[k] for k in self.param_names]
if isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
new_params = np.atleast_1d(np.array(params, dtype=np.float64))
xout = np.empty((len(varied_data), len(x0)))
self.internal_xout = np.empty_like(xout)
self.internal_params_out = np.empty((len(varied_data),
len(new_params)))
info_dicts = []
new_x0 = np.array(x0, dtype=np.float64) # copy
conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys
for idx, value in enumerate(varied_data):
try:
new_params[varied_idx] = value
except TypeError:
new_params = value # e.g. type(new_params) == int
if conds is not None:
kwargs['initial_conditions'] = conds
x, info_dict = self.solve(new_x0, new_params, internal_x0, solver,
**kwargs)
if propagate:
if info_dict['success']:
try:
# See ChainedNeqSys.solve
new_x0 = info_dict['x_vecs'][0]
internal_x0 = info_dict['internal_x_vecs'][0]
conds = info_dict['intermediate_info'][0].get(
'conditions', None)
except:
new_x0 = x
internal_x0 = None
conds = info_dict.get('conditions', None)
xout[idx, :] = x
self.internal_xout[idx, :] = self.internal_x
self.internal_params_out[idx, :] = self.internal_params
info_dicts.append(info_dict)
return xout, info_dicts
def plot_series(self, xres, varied_data, varied_idx, **kwargs):
""" Plots the results from :meth:`solve_series`.
Parameters
----------
xres : array
Of shape ``(varied_data.size, self.nx)``.
varied_data : array
See :meth:`solve_series`.
varied_idx : int or str
See :meth:`solve_series`.
\\*\\*kwargs :
Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.
"""
for attr in 'names latex_names'.split():
if kwargs.get(attr, None) is None:
kwargs[attr] = getattr(self, attr)
ax = plot_series(xres, varied_data, **kwargs)
if self.par_by_name and isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
if self.latex_param_names:
ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx])
elif self.param_names:
ax.set_xlabel(self.param_names[varied_idx])
return ax
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs):
""" Analogous to :meth:`plot_series` but will plot residuals. """
nf = len(self.f_cb(*self.pre_process(xres[0], params)))
xerr = np.empty((xres.shape[0], nf))
new_params = np.array(params)
for idx, row in enumerate(xres):
new_params[varied_idx] = varied_data[idx]
xerr[idx, :] = self.f_cb(*self.pre_process(row, params))
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
def solve_and_plot_series(self, x0, params, varied_data, varied_idx, solver=None, plot_kwargs=None,
plot_residuals_kwargs=None, **kwargs):
""" Solve and plot for a series of a varied parameter.
Convenience method, see :meth:`solve_series`, :meth:`plot_series` &
:meth:`plot_series_residuals_internal` for more information.
"""
sol, nfo = self.solve_series(
x0, params, varied_data, varied_idx, solver=solver, **kwargs)
ax_sol = self.plot_series(sol, varied_data, varied_idx, info=nfo,
**(plot_kwargs or {}))
extra = dict(ax_sol=ax_sol, info=nfo)
if plot_residuals_kwargs:
extra['ax_resid'] = self.plot_series_residuals_internal(
varied_data, varied_idx, info=nfo,
**(plot_residuals_kwargs or {})
)
return sol, extra
|
bjodah/pyneqsys | pyneqsys/core.py | _NeqSysBase.solve_and_plot_series | python | def solve_and_plot_series(self, x0, params, varied_data, varied_idx, solver=None, plot_kwargs=None,
plot_residuals_kwargs=None, **kwargs):
sol, nfo = self.solve_series(
x0, params, varied_data, varied_idx, solver=solver, **kwargs)
ax_sol = self.plot_series(sol, varied_data, varied_idx, info=nfo,
**(plot_kwargs or {}))
extra = dict(ax_sol=ax_sol, info=nfo)
if plot_residuals_kwargs:
extra['ax_resid'] = self.plot_series_residuals_internal(
varied_data, varied_idx, info=nfo,
**(plot_residuals_kwargs or {})
)
return sol, extra | Solve and plot for a series of a varied parameter.
Convenience method, see :meth:`solve_series`, :meth:`plot_series` &
:meth:`plot_series_residuals_internal` for more information. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L215-L233 | [
"def solve_series(self, x0, params, varied_data, varied_idx,\n internal_x0=None, solver=None, propagate=True, **kwargs):\n \"\"\" Solve system for a set of parameters in which one is varied\n\n Parameters\n ----------\n x0 : array_like\n Guess (subject to ``self.post_processors``)\n params : array_like\n Parameter values\n vaired_data : array_like\n Numerical values of the varied parameter.\n varied_idx : int or str\n Index of the varied parameter (indexing starts at 0).\n If ``self.par_by_name`` this should be the name (str) of the varied\n parameter.\n internal_x0 : array_like (default: None)\n Guess (*not* subject to ``self.post_processors``).\n Overrides ``x0`` when given.\n solver : str or callback\n See :meth:`solve`.\n propagate : bool (default: True)\n Use last successful solution as ``x0`` in consecutive solves.\n \\\\*\\\\*kwargs :\n Keyword arguments pass along to :meth:`solve`.\n\n Returns\n -------\n xout : array\n Of shape ``(varied_data.size, x0.size)``.\n info_dicts : list of dictionaries\n Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc.\n\n \"\"\"\n if self.x_by_name and isinstance(x0, dict):\n x0 = [x0[k] for k in self.names]\n if self.par_by_name:\n if isinstance(params, dict):\n params = [params[k] for k in self.param_names]\n if isinstance(varied_idx, str):\n varied_idx = self.param_names.index(varied_idx)\n\n new_params = np.atleast_1d(np.array(params, dtype=np.float64))\n xout = np.empty((len(varied_data), len(x0)))\n self.internal_xout = np.empty_like(xout)\n self.internal_params_out = np.empty((len(varied_data),\n len(new_params)))\n info_dicts = []\n new_x0 = np.array(x0, dtype=np.float64) # copy\n conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys\n for idx, value in enumerate(varied_data):\n try:\n new_params[varied_idx] = value\n except TypeError:\n new_params = value # e.g. type(new_params) == int\n if conds is not None:\n kwargs['initial_conditions'] = conds\n x, info_dict = self.solve(new_x0, new_params, internal_x0, solver,\n **kwargs)\n if propagate:\n if info_dict['success']:\n try:\n # See ChainedNeqSys.solve\n new_x0 = info_dict['x_vecs'][0]\n internal_x0 = info_dict['internal_x_vecs'][0]\n conds = info_dict['intermediate_info'][0].get(\n 'conditions', None)\n except:\n new_x0 = x\n internal_x0 = None\n conds = info_dict.get('conditions', None)\n xout[idx, :] = x\n self.internal_xout[idx, :] = self.internal_x\n self.internal_params_out[idx, :] = self.internal_params\n info_dicts.append(info_dict)\n return xout, info_dicts\n",
"def plot_series(self, xres, varied_data, varied_idx, **kwargs):\n \"\"\" Plots the results from :meth:`solve_series`.\n\n Parameters\n ----------\n xres : array\n Of shape ``(varied_data.size, self.nx)``.\n varied_data : array\n See :meth:`solve_series`.\n varied_idx : int or str\n See :meth:`solve_series`.\n \\\\*\\\\*kwargs :\n Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.\n\n \"\"\"\n for attr in 'names latex_names'.split():\n if kwargs.get(attr, None) is None:\n kwargs[attr] = getattr(self, attr)\n ax = plot_series(xres, varied_data, **kwargs)\n if self.par_by_name and isinstance(varied_idx, str):\n varied_idx = self.param_names.index(varied_idx)\n if self.latex_param_names:\n ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx])\n elif self.param_names:\n ax.set_xlabel(self.param_names[varied_idx])\n return ax\n",
"def plot_series_residuals_internal(self, varied_data, varied_idx, **kwargs):\n \"\"\" Analogous to :meth:`plot_series` but for internal residuals from last run. \"\"\"\n nf = len(self.f_cb(*self.pre_process(\n self.internal_xout[0], self.internal_params_out[0])))\n xerr = np.empty((self.internal_xout.shape[0], nf))\n for idx, (res, params) in enumerate(zip(self.internal_xout, self.internal_params_out)):\n xerr[idx, :] = self.f_cb(res, params)\n return self.plot_series(xerr, varied_data, varied_idx, **kwargs)\n"
] | class _NeqSysBase(object):
""" Baseclass for system of non-linear equations.
This class contains shared logic used by its subclasses and is not meant to be used
by end-users directly.
"""
def __init__(self, names=None, param_names=None, x_by_name=None, par_by_name=None,
latex_names=None, latex_param_names=None):
self.names = names or ()
self.param_names = param_names or ()
self.x_by_name = x_by_name
self.par_by_name = par_by_name
self.latex_names = latex_names or ()
self.latex_param_names = latex_param_names or ()
def _get_solver_cb(self, solver, attached_solver):
if attached_solver is not None:
if solver is not None:
raise ValueError("solver must be None.")
solver = attached_solver(self)
if callable(solver):
return solver
if solver is None:
solver = os.environ.get('PYNEQSYS_SOLVER', 'scipy')
return getattr(self, '_solve_' + solver)
def rms(self, x, params=()):
""" Returns root mean square value of f(x, params) """
internal_x, internal_params = self.pre_process(np.asarray(x),
np.asarray(params))
if internal_params.ndim > 1:
raise NotImplementedError("Parameters should be constant.")
result = np.empty(internal_x.size//self.nx)
for idx in range(internal_x.shape[0]):
result[idx] = np.sqrt(np.mean(np.square(self.f_cb(
internal_x[idx, :], internal_params))))
return result
def solve_series(self, x0, params, varied_data, varied_idx,
internal_x0=None, solver=None, propagate=True, **kwargs):
""" Solve system for a set of parameters in which one is varied
Parameters
----------
x0 : array_like
Guess (subject to ``self.post_processors``)
params : array_like
Parameter values
vaired_data : array_like
Numerical values of the varied parameter.
varied_idx : int or str
Index of the varied parameter (indexing starts at 0).
If ``self.par_by_name`` this should be the name (str) of the varied
parameter.
internal_x0 : array_like (default: None)
Guess (*not* subject to ``self.post_processors``).
Overrides ``x0`` when given.
solver : str or callback
See :meth:`solve`.
propagate : bool (default: True)
Use last successful solution as ``x0`` in consecutive solves.
\\*\\*kwargs :
Keyword arguments pass along to :meth:`solve`.
Returns
-------
xout : array
Of shape ``(varied_data.size, x0.size)``.
info_dicts : list of dictionaries
Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc.
"""
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name:
if isinstance(params, dict):
params = [params[k] for k in self.param_names]
if isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
new_params = np.atleast_1d(np.array(params, dtype=np.float64))
xout = np.empty((len(varied_data), len(x0)))
self.internal_xout = np.empty_like(xout)
self.internal_params_out = np.empty((len(varied_data),
len(new_params)))
info_dicts = []
new_x0 = np.array(x0, dtype=np.float64) # copy
conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys
for idx, value in enumerate(varied_data):
try:
new_params[varied_idx] = value
except TypeError:
new_params = value # e.g. type(new_params) == int
if conds is not None:
kwargs['initial_conditions'] = conds
x, info_dict = self.solve(new_x0, new_params, internal_x0, solver,
**kwargs)
if propagate:
if info_dict['success']:
try:
# See ChainedNeqSys.solve
new_x0 = info_dict['x_vecs'][0]
internal_x0 = info_dict['internal_x_vecs'][0]
conds = info_dict['intermediate_info'][0].get(
'conditions', None)
except:
new_x0 = x
internal_x0 = None
conds = info_dict.get('conditions', None)
xout[idx, :] = x
self.internal_xout[idx, :] = self.internal_x
self.internal_params_out[idx, :] = self.internal_params
info_dicts.append(info_dict)
return xout, info_dicts
def plot_series(self, xres, varied_data, varied_idx, **kwargs):
""" Plots the results from :meth:`solve_series`.
Parameters
----------
xres : array
Of shape ``(varied_data.size, self.nx)``.
varied_data : array
See :meth:`solve_series`.
varied_idx : int or str
See :meth:`solve_series`.
\\*\\*kwargs :
Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.
"""
for attr in 'names latex_names'.split():
if kwargs.get(attr, None) is None:
kwargs[attr] = getattr(self, attr)
ax = plot_series(xres, varied_data, **kwargs)
if self.par_by_name and isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
if self.latex_param_names:
ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx])
elif self.param_names:
ax.set_xlabel(self.param_names[varied_idx])
return ax
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs):
""" Analogous to :meth:`plot_series` but will plot residuals. """
nf = len(self.f_cb(*self.pre_process(xres[0], params)))
xerr = np.empty((xres.shape[0], nf))
new_params = np.array(params)
for idx, row in enumerate(xres):
new_params[varied_idx] = varied_data[idx]
xerr[idx, :] = self.f_cb(*self.pre_process(row, params))
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
def plot_series_residuals_internal(self, varied_data, varied_idx, **kwargs):
""" Analogous to :meth:`plot_series` but for internal residuals from last run. """
nf = len(self.f_cb(*self.pre_process(
self.internal_xout[0], self.internal_params_out[0])))
xerr = np.empty((self.internal_xout.shape[0], nf))
for idx, (res, params) in enumerate(zip(self.internal_xout, self.internal_params_out)):
xerr[idx, :] = self.f_cb(res, params)
return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
|
bjodah/pyneqsys | pyneqsys/core.py | NeqSys.pre_process | python | def pre_process(self, x0, params=()):
# Should be used by all methods matching "solve_*"
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name and isinstance(params, dict):
params = [params[k] for k in self.param_names]
for pre_processor in self.pre_processors:
x0, params = pre_processor(x0, params)
return x0, np.atleast_1d(params) | Used internally for transformation of variables. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L318-L327 | null | class NeqSys(_NeqSysBase):
"""Represents a system of non-linear equations.
This class provides a unified interface to:
- scipy.optimize.root
- NLEQ2
- KINSOL
- mpmath
- levmar
Parameters
----------
nf : int
Number of functions.
nx : int
Number of independent variables.
f : callback
Function to solve for. Signature ``f(x) -> y`` where ``len(x) == nx``
and ``len(y) == nf``.
jac : callback or None (default)
Jacobian matrix (dfdy).
band : tuple (default: None)
Number of sub- and super-diagonals in jacobian.
names : iterable of str (default: None)
Names of variables, used for plotting and for referencing by name.
param_names : iterable of strings (default: None)
Names of the parameters, used for referencing parameters by name.
x_by_name : bool, default: ``False``
Will values for *x* be referred to by name (in dictionaries)
instead of by index (in arrays)?
par_by_name : bool, default: ``False``
Will values for parameters be referred to by name (in dictionaries)
instead of by index (in arrays)?
latex_names : iterable of str, optional
Names of variables in LaTeX format.
latex_param_names : iterable of str, optional
Names of parameters in LaTeX format.
pre_processors : iterable of callables (optional)
(Forward) transformation of user-input to :py:meth:`solve`
signature: ``f(x1[:], params1[:]) -> x2[:], params2[:]``.
Insert at beginning.
post_processors : iterable of callables (optional)
(Backward) transformation of result from :py:meth:`solve`
signature: ``f(x2[:], params2[:]) -> x1[:], params1[:]``.
Insert at end.
internal_x0_cb : callback (optional)
callback with signature ``f(x[:], p[:]) -> x0[:]``
if not specified, ``x`` from ``self.pre_processors`` will be used.
Examples
--------
>>> neqsys = NeqSys(2, 2, lambda x, p: [(x[0] - x[1])**p[0]/2 + x[0] - 1,
... (x[1] - x[0])**p[0]/2 + x[1]])
>>> x, sol = neqsys.solve([1, 0], [3])
>>> assert sol['success']
>>> print(x)
[ 0.8411639 0.1588361]
See Also
--------
pyneqsys.symbolic.SymbolicSys : use a CAS (SymPy by default) to derive
the jacobian.
"""
def __init__(self, nf, nx=None, f=None, jac=None, band=None, pre_processors=None,
post_processors=None, internal_x0_cb=None, **kwargs):
super(NeqSys, self).__init__(**kwargs)
if nx is None:
nx = len(self.names)
if f is None:
raise ValueError("A callback for f must be provided")
if nf < nx:
raise ValueError("Under-determined system")
self.nf, self.nx = nf, nx
self.f_cb = _ensure_3args(f)
self.j_cb = _ensure_3args(jac)
self.band = band
self.pre_processors = pre_processors or []
self.post_processors = post_processors or []
self.internal_x0_cb = internal_x0_cb
def post_process(self, xout, params_out):
""" Used internally for transformation of variables. """
# Should be used by all methods matching "solve_*"
for post_processor in self.post_processors:
xout, params_out = post_processor(xout, params_out)
return xout, params_out
def solve(self, x0, params=(), internal_x0=None, solver=None, attached_solver=None, **kwargs):
""" Solve with user specified ``solver`` choice.
Parameters
----------
x0: 1D array of floats
Guess (subject to ``self.post_processors``)
params: 1D array_like of floats
Parameters (subject to ``self.post_processors``)
internal_x0: 1D array of floats
When given it overrides (processed) ``x0``. ``internal_x0`` is not
subject to ``self.post_processors``.
solver: str or callable or None or iterable of such
if str: uses _solve_``solver``(\*args, \*\*kwargs).
if ``None``: chooses from PYNEQSYS_SOLVER environment variable.
if iterable: chain solving.
attached_solver: callable factory
Invokes: solver = attached_solver(self).
Returns
-------
array:
solution vector (post-processed by self.post_processors)
dict:
info dictionary containing 'success', 'nfev', 'njev' etc.
Examples
--------
>>> neqsys = NeqSys(2, 2, lambda x, p: [
... (x[0] - x[1])**p[0]/2 + x[0] - 1,
... (x[1] - x[0])**p[0]/2 + x[1]
... ])
>>> x, sol = neqsys.solve([1, 0], [3], solver=(None, 'mpmath'))
>>> assert sol['success']
>>> print(x)
[0.841163901914009663684741869855]
[0.158836098085990336315258130144]
"""
if not isinstance(solver, (tuple, list)):
solver = [solver]
if not isinstance(attached_solver, (tuple, list)):
attached_solver = [attached_solver] + [None]*(len(solver) - 1)
_x0, self.internal_params = self.pre_process(x0, params)
for solv, attached_solv in zip(solver, attached_solver):
if internal_x0 is not None:
_x0 = internal_x0
elif self.internal_x0_cb is not None:
_x0 = self.internal_x0_cb(x0, params)
nfo = self._get_solver_cb(solv, attached_solv)(_x0, **kwargs)
_x0 = nfo['x'].copy()
self.internal_x = _x0
x0 = self.post_process(self.internal_x, self.internal_params)[0]
return x0, nfo
def _solve_scipy(self, intern_x0, tol=1e-8, method=None, **kwargs):
""" Uses ``scipy.optimize.root``
See: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html
Parameters
----------
intern_x0: array_like
initial guess
tol: float
Tolerance
method: str
What method to use. Defaults to ``'lm'`` if ``self.nf > self.nx`` otherwise ``'hybr'``.
"""
from scipy.optimize import root
if method is None:
if self.nf > self.nx:
method = 'lm'
elif self.nf == self.nx:
method = 'hybr'
else:
raise ValueError('Underdetermined problem')
if 'band' in kwargs:
raise ValueError("Set 'band' at initialization instead.")
if 'args' in kwargs:
raise ValueError("Set 'args' as params in initialization instead.")
new_kwargs = kwargs.copy()
if self.band is not None:
warnings.warn("Band argument ignored (see SciPy docs)")
new_kwargs['band'] = self.band
new_kwargs['args'] = self.internal_params
return root(self.f_cb, intern_x0, jac=self.j_cb, method=method, tol=tol, **new_kwargs)
def _solve_nleq2(self, intern_x0, tol=1e-8, method=None, **kwargs):
from pynleq2 import solve
def f_cb(x, ierr):
f_cb.nfev += 1
return self.f_cb(x, self.internal_params), ierr
f_cb.nfev = 0
def j_cb(x, ierr):
j_cb.njev += 1
return self.j_cb(x, self.internal_params), ierr
j_cb.njev = 0
x, ierr = solve(f_cb, j_cb, intern_x0, **kwargs)
return {
'x': x,
'fun': np.asarray(f_cb(x, 0)),
'success': ierr == 0,
'nfev': f_cb.nfev,
'njev': j_cb.njev,
'ierr': ierr,
}
def _solve_kinsol(self, intern_x0, **kwargs):
import pykinsol
def _f(x, fout):
res = self.f_cb(x, self.internal_params)
fout[:] = res
def _j(x, Jout, fx):
res = self.j_cb(x, self.internal_params)
Jout[:, :] = res[:, :]
return pykinsol.solve(_f, _j, intern_x0, **kwargs)
def _solve_mpmath(self, intern_x0, dps=30, tol=None,
maxsteps=None, **kwargs):
import mpmath
from mpmath.calculus.optimization import MDNewton
mp = mpmath.mp
mp.dps = dps
def _mpf(val):
try:
return mp.mpf(val)
except TypeError: # e.g. mpmath chokes on numpy's int64
return mp.mpf(float(val))
intern_p = tuple(_mpf(_p) for _p in self.internal_params)
maxsteps = maxsteps or MDNewton.maxsteps
tol = tol or mp.eps * 1024
def f_cb(*x):
f_cb.nfev += 1
return self.f_cb(x, intern_p)
f_cb.nfev = 0
if self.j_cb is not None:
def j_cb(*x):
j_cb.njev += 1
return self.j_cb(x, intern_p)
j_cb.njev = 0
kwargs['J'] = j_cb
intern_x0 = tuple(_mpf(_x) for _x in intern_x0)
iters = MDNewton(mp, f_cb, intern_x0, norm=mp.norm, verbose=False, **kwargs)
i = 0
success = False
for x, err in iters:
i += 1
lim = tol*max(mp.norm(x), 1)
if err < lim:
success = True
break
if i >= maxsteps:
break
result = {'x': x, 'success': success, 'nfev': f_cb.nfev, 'nit': i}
if self.j_cb is not None:
result['njev'] = j_cb.njev
return result
def _solve_ipopt(self, intern_x0, **kwargs):
import warnings
from ipopt import minimize_ipopt
warnings.warn("ipopt interface has not yet undergone thorough testing.")
def f_cb(x):
f_cb.nfev += 1
return np.sum(np.abs(self.f_cb(x, self.internal_params)))
f_cb.nfev = 0
if self.j_cb is not None:
def j_cb(x):
j_cb.njev += 1
return self.j_cb(x, self.internal_params)
j_cb.njev = 0
kwargs['jac'] = j_cb
return minimize_ipopt(f_cb, intern_x0, **kwargs)
def _solve_levmar(self, intern_x0, tol=1e-8, **kwargs):
import warnings
import levmar
if 'eps1' in kwargs or 'eps2' in kwargs or 'eps3' in kwargs:
pass
else:
kwargs['eps1'] = kwargs['eps2'] = kwargs['eps3'] = tol
def _f(*args):
return np.asarray(self.f_cb(*args))
def _j(*args):
return np.asarray(self.j_cb(*args))
_x0 = np.asarray(intern_x0)
_y0 = np.zeros(self.nf)
with warnings.catch_warnings(record=True) as wrns:
warnings.simplefilter("always")
p_opt, p_cov, info = levmar.levmar(_f, _x0, _y0, args=(self.internal_params,),
jacf=_j, **kwargs)
success = len(wrns) == 0 and np.all(np.abs(_f(p_opt, self.internal_params)) < tol)
for w in wrns:
raise w
e2p0, (e2, infJTe, Dp2, mu_maxJTJii), nit, reason, nfev, njev, nlinsolv = info
return {'x': p_opt, 'cov': p_cov, 'nfev': nfev, 'njev': njev, 'nit': nit,
'message': reason, 'nlinsolv': nlinsolv, 'success': success}
|
bjodah/pyneqsys | pyneqsys/core.py | NeqSys.post_process | python | def post_process(self, xout, params_out):
# Should be used by all methods matching "solve_*"
for post_processor in self.post_processors:
xout, params_out = post_processor(xout, params_out)
return xout, params_out | Used internally for transformation of variables. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L329-L334 | null | class NeqSys(_NeqSysBase):
"""Represents a system of non-linear equations.
This class provides a unified interface to:
- scipy.optimize.root
- NLEQ2
- KINSOL
- mpmath
- levmar
Parameters
----------
nf : int
Number of functions.
nx : int
Number of independent variables.
f : callback
Function to solve for. Signature ``f(x) -> y`` where ``len(x) == nx``
and ``len(y) == nf``.
jac : callback or None (default)
Jacobian matrix (dfdy).
band : tuple (default: None)
Number of sub- and super-diagonals in jacobian.
names : iterable of str (default: None)
Names of variables, used for plotting and for referencing by name.
param_names : iterable of strings (default: None)
Names of the parameters, used for referencing parameters by name.
x_by_name : bool, default: ``False``
Will values for *x* be referred to by name (in dictionaries)
instead of by index (in arrays)?
par_by_name : bool, default: ``False``
Will values for parameters be referred to by name (in dictionaries)
instead of by index (in arrays)?
latex_names : iterable of str, optional
Names of variables in LaTeX format.
latex_param_names : iterable of str, optional
Names of parameters in LaTeX format.
pre_processors : iterable of callables (optional)
(Forward) transformation of user-input to :py:meth:`solve`
signature: ``f(x1[:], params1[:]) -> x2[:], params2[:]``.
Insert at beginning.
post_processors : iterable of callables (optional)
(Backward) transformation of result from :py:meth:`solve`
signature: ``f(x2[:], params2[:]) -> x1[:], params1[:]``.
Insert at end.
internal_x0_cb : callback (optional)
callback with signature ``f(x[:], p[:]) -> x0[:]``
if not specified, ``x`` from ``self.pre_processors`` will be used.
Examples
--------
>>> neqsys = NeqSys(2, 2, lambda x, p: [(x[0] - x[1])**p[0]/2 + x[0] - 1,
... (x[1] - x[0])**p[0]/2 + x[1]])
>>> x, sol = neqsys.solve([1, 0], [3])
>>> assert sol['success']
>>> print(x)
[ 0.8411639 0.1588361]
See Also
--------
pyneqsys.symbolic.SymbolicSys : use a CAS (SymPy by default) to derive
the jacobian.
"""
def __init__(self, nf, nx=None, f=None, jac=None, band=None, pre_processors=None,
post_processors=None, internal_x0_cb=None, **kwargs):
super(NeqSys, self).__init__(**kwargs)
if nx is None:
nx = len(self.names)
if f is None:
raise ValueError("A callback for f must be provided")
if nf < nx:
raise ValueError("Under-determined system")
self.nf, self.nx = nf, nx
self.f_cb = _ensure_3args(f)
self.j_cb = _ensure_3args(jac)
self.band = band
self.pre_processors = pre_processors or []
self.post_processors = post_processors or []
self.internal_x0_cb = internal_x0_cb
def pre_process(self, x0, params=()):
""" Used internally for transformation of variables. """
# Should be used by all methods matching "solve_*"
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name and isinstance(params, dict):
params = [params[k] for k in self.param_names]
for pre_processor in self.pre_processors:
x0, params = pre_processor(x0, params)
return x0, np.atleast_1d(params)
def solve(self, x0, params=(), internal_x0=None, solver=None, attached_solver=None, **kwargs):
""" Solve with user specified ``solver`` choice.
Parameters
----------
x0: 1D array of floats
Guess (subject to ``self.post_processors``)
params: 1D array_like of floats
Parameters (subject to ``self.post_processors``)
internal_x0: 1D array of floats
When given it overrides (processed) ``x0``. ``internal_x0`` is not
subject to ``self.post_processors``.
solver: str or callable or None or iterable of such
if str: uses _solve_``solver``(\*args, \*\*kwargs).
if ``None``: chooses from PYNEQSYS_SOLVER environment variable.
if iterable: chain solving.
attached_solver: callable factory
Invokes: solver = attached_solver(self).
Returns
-------
array:
solution vector (post-processed by self.post_processors)
dict:
info dictionary containing 'success', 'nfev', 'njev' etc.
Examples
--------
>>> neqsys = NeqSys(2, 2, lambda x, p: [
... (x[0] - x[1])**p[0]/2 + x[0] - 1,
... (x[1] - x[0])**p[0]/2 + x[1]
... ])
>>> x, sol = neqsys.solve([1, 0], [3], solver=(None, 'mpmath'))
>>> assert sol['success']
>>> print(x)
[0.841163901914009663684741869855]
[0.158836098085990336315258130144]
"""
if not isinstance(solver, (tuple, list)):
solver = [solver]
if not isinstance(attached_solver, (tuple, list)):
attached_solver = [attached_solver] + [None]*(len(solver) - 1)
_x0, self.internal_params = self.pre_process(x0, params)
for solv, attached_solv in zip(solver, attached_solver):
if internal_x0 is not None:
_x0 = internal_x0
elif self.internal_x0_cb is not None:
_x0 = self.internal_x0_cb(x0, params)
nfo = self._get_solver_cb(solv, attached_solv)(_x0, **kwargs)
_x0 = nfo['x'].copy()
self.internal_x = _x0
x0 = self.post_process(self.internal_x, self.internal_params)[0]
return x0, nfo
def _solve_scipy(self, intern_x0, tol=1e-8, method=None, **kwargs):
""" Uses ``scipy.optimize.root``
See: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html
Parameters
----------
intern_x0: array_like
initial guess
tol: float
Tolerance
method: str
What method to use. Defaults to ``'lm'`` if ``self.nf > self.nx`` otherwise ``'hybr'``.
"""
from scipy.optimize import root
if method is None:
if self.nf > self.nx:
method = 'lm'
elif self.nf == self.nx:
method = 'hybr'
else:
raise ValueError('Underdetermined problem')
if 'band' in kwargs:
raise ValueError("Set 'band' at initialization instead.")
if 'args' in kwargs:
raise ValueError("Set 'args' as params in initialization instead.")
new_kwargs = kwargs.copy()
if self.band is not None:
warnings.warn("Band argument ignored (see SciPy docs)")
new_kwargs['band'] = self.band
new_kwargs['args'] = self.internal_params
return root(self.f_cb, intern_x0, jac=self.j_cb, method=method, tol=tol, **new_kwargs)
def _solve_nleq2(self, intern_x0, tol=1e-8, method=None, **kwargs):
from pynleq2 import solve
def f_cb(x, ierr):
f_cb.nfev += 1
return self.f_cb(x, self.internal_params), ierr
f_cb.nfev = 0
def j_cb(x, ierr):
j_cb.njev += 1
return self.j_cb(x, self.internal_params), ierr
j_cb.njev = 0
x, ierr = solve(f_cb, j_cb, intern_x0, **kwargs)
return {
'x': x,
'fun': np.asarray(f_cb(x, 0)),
'success': ierr == 0,
'nfev': f_cb.nfev,
'njev': j_cb.njev,
'ierr': ierr,
}
def _solve_kinsol(self, intern_x0, **kwargs):
import pykinsol
def _f(x, fout):
res = self.f_cb(x, self.internal_params)
fout[:] = res
def _j(x, Jout, fx):
res = self.j_cb(x, self.internal_params)
Jout[:, :] = res[:, :]
return pykinsol.solve(_f, _j, intern_x0, **kwargs)
def _solve_mpmath(self, intern_x0, dps=30, tol=None,
maxsteps=None, **kwargs):
import mpmath
from mpmath.calculus.optimization import MDNewton
mp = mpmath.mp
mp.dps = dps
def _mpf(val):
try:
return mp.mpf(val)
except TypeError: # e.g. mpmath chokes on numpy's int64
return mp.mpf(float(val))
intern_p = tuple(_mpf(_p) for _p in self.internal_params)
maxsteps = maxsteps or MDNewton.maxsteps
tol = tol or mp.eps * 1024
def f_cb(*x):
f_cb.nfev += 1
return self.f_cb(x, intern_p)
f_cb.nfev = 0
if self.j_cb is not None:
def j_cb(*x):
j_cb.njev += 1
return self.j_cb(x, intern_p)
j_cb.njev = 0
kwargs['J'] = j_cb
intern_x0 = tuple(_mpf(_x) for _x in intern_x0)
iters = MDNewton(mp, f_cb, intern_x0, norm=mp.norm, verbose=False, **kwargs)
i = 0
success = False
for x, err in iters:
i += 1
lim = tol*max(mp.norm(x), 1)
if err < lim:
success = True
break
if i >= maxsteps:
break
result = {'x': x, 'success': success, 'nfev': f_cb.nfev, 'nit': i}
if self.j_cb is not None:
result['njev'] = j_cb.njev
return result
def _solve_ipopt(self, intern_x0, **kwargs):
import warnings
from ipopt import minimize_ipopt
warnings.warn("ipopt interface has not yet undergone thorough testing.")
def f_cb(x):
f_cb.nfev += 1
return np.sum(np.abs(self.f_cb(x, self.internal_params)))
f_cb.nfev = 0
if self.j_cb is not None:
def j_cb(x):
j_cb.njev += 1
return self.j_cb(x, self.internal_params)
j_cb.njev = 0
kwargs['jac'] = j_cb
return minimize_ipopt(f_cb, intern_x0, **kwargs)
def _solve_levmar(self, intern_x0, tol=1e-8, **kwargs):
import warnings
import levmar
if 'eps1' in kwargs or 'eps2' in kwargs or 'eps3' in kwargs:
pass
else:
kwargs['eps1'] = kwargs['eps2'] = kwargs['eps3'] = tol
def _f(*args):
return np.asarray(self.f_cb(*args))
def _j(*args):
return np.asarray(self.j_cb(*args))
_x0 = np.asarray(intern_x0)
_y0 = np.zeros(self.nf)
with warnings.catch_warnings(record=True) as wrns:
warnings.simplefilter("always")
p_opt, p_cov, info = levmar.levmar(_f, _x0, _y0, args=(self.internal_params,),
jacf=_j, **kwargs)
success = len(wrns) == 0 and np.all(np.abs(_f(p_opt, self.internal_params)) < tol)
for w in wrns:
raise w
e2p0, (e2, infJTe, Dp2, mu_maxJTJii), nit, reason, nfev, njev, nlinsolv = info
return {'x': p_opt, 'cov': p_cov, 'nfev': nfev, 'njev': njev, 'nit': nit,
'message': reason, 'nlinsolv': nlinsolv, 'success': success}
|
bjodah/pyneqsys | pyneqsys/core.py | NeqSys.solve | python | def solve(self, x0, params=(), internal_x0=None, solver=None, attached_solver=None, **kwargs):
if not isinstance(solver, (tuple, list)):
solver = [solver]
if not isinstance(attached_solver, (tuple, list)):
attached_solver = [attached_solver] + [None]*(len(solver) - 1)
_x0, self.internal_params = self.pre_process(x0, params)
for solv, attached_solv in zip(solver, attached_solver):
if internal_x0 is not None:
_x0 = internal_x0
elif self.internal_x0_cb is not None:
_x0 = self.internal_x0_cb(x0, params)
nfo = self._get_solver_cb(solv, attached_solv)(_x0, **kwargs)
_x0 = nfo['x'].copy()
self.internal_x = _x0
x0 = self.post_process(self.internal_x, self.internal_params)[0]
return x0, nfo | Solve with user specified ``solver`` choice.
Parameters
----------
x0: 1D array of floats
Guess (subject to ``self.post_processors``)
params: 1D array_like of floats
Parameters (subject to ``self.post_processors``)
internal_x0: 1D array of floats
When given it overrides (processed) ``x0``. ``internal_x0`` is not
subject to ``self.post_processors``.
solver: str or callable or None or iterable of such
if str: uses _solve_``solver``(\*args, \*\*kwargs).
if ``None``: chooses from PYNEQSYS_SOLVER environment variable.
if iterable: chain solving.
attached_solver: callable factory
Invokes: solver = attached_solver(self).
Returns
-------
array:
solution vector (post-processed by self.post_processors)
dict:
info dictionary containing 'success', 'nfev', 'njev' etc.
Examples
--------
>>> neqsys = NeqSys(2, 2, lambda x, p: [
... (x[0] - x[1])**p[0]/2 + x[0] - 1,
... (x[1] - x[0])**p[0]/2 + x[1]
... ])
>>> x, sol = neqsys.solve([1, 0], [3], solver=(None, 'mpmath'))
>>> assert sol['success']
>>> print(x)
[0.841163901914009663684741869855]
[0.158836098085990336315258130144] | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L336-L390 | [
"def _get_solver_cb(self, solver, attached_solver):\n if attached_solver is not None:\n if solver is not None:\n raise ValueError(\"solver must be None.\")\n solver = attached_solver(self)\n if callable(solver):\n return solver\n if solver is None:\n solver = os.environ.get('PYNEQSYS_SOLVER', 'scipy')\n return getattr(self, '_solve_' + solver)\n",
"def pre_process(self, x0, params=()):\n \"\"\" Used internally for transformation of variables. \"\"\"\n # Should be used by all methods matching \"solve_*\"\n if self.x_by_name and isinstance(x0, dict):\n x0 = [x0[k] for k in self.names]\n if self.par_by_name and isinstance(params, dict):\n params = [params[k] for k in self.param_names]\n for pre_processor in self.pre_processors:\n x0, params = pre_processor(x0, params)\n return x0, np.atleast_1d(params)\n",
"def post_process(self, xout, params_out):\n \"\"\" Used internally for transformation of variables. \"\"\"\n # Should be used by all methods matching \"solve_*\"\n for post_processor in self.post_processors:\n xout, params_out = post_processor(xout, params_out)\n return xout, params_out\n"
] | class NeqSys(_NeqSysBase):
"""Represents a system of non-linear equations.
This class provides a unified interface to:
- scipy.optimize.root
- NLEQ2
- KINSOL
- mpmath
- levmar
Parameters
----------
nf : int
Number of functions.
nx : int
Number of independent variables.
f : callback
Function to solve for. Signature ``f(x) -> y`` where ``len(x) == nx``
and ``len(y) == nf``.
jac : callback or None (default)
Jacobian matrix (dfdy).
band : tuple (default: None)
Number of sub- and super-diagonals in jacobian.
names : iterable of str (default: None)
Names of variables, used for plotting and for referencing by name.
param_names : iterable of strings (default: None)
Names of the parameters, used for referencing parameters by name.
x_by_name : bool, default: ``False``
Will values for *x* be referred to by name (in dictionaries)
instead of by index (in arrays)?
par_by_name : bool, default: ``False``
Will values for parameters be referred to by name (in dictionaries)
instead of by index (in arrays)?
latex_names : iterable of str, optional
Names of variables in LaTeX format.
latex_param_names : iterable of str, optional
Names of parameters in LaTeX format.
pre_processors : iterable of callables (optional)
(Forward) transformation of user-input to :py:meth:`solve`
signature: ``f(x1[:], params1[:]) -> x2[:], params2[:]``.
Insert at beginning.
post_processors : iterable of callables (optional)
(Backward) transformation of result from :py:meth:`solve`
signature: ``f(x2[:], params2[:]) -> x1[:], params1[:]``.
Insert at end.
internal_x0_cb : callback (optional)
callback with signature ``f(x[:], p[:]) -> x0[:]``
if not specified, ``x`` from ``self.pre_processors`` will be used.
Examples
--------
>>> neqsys = NeqSys(2, 2, lambda x, p: [(x[0] - x[1])**p[0]/2 + x[0] - 1,
... (x[1] - x[0])**p[0]/2 + x[1]])
>>> x, sol = neqsys.solve([1, 0], [3])
>>> assert sol['success']
>>> print(x)
[ 0.8411639 0.1588361]
See Also
--------
pyneqsys.symbolic.SymbolicSys : use a CAS (SymPy by default) to derive
the jacobian.
"""
def __init__(self, nf, nx=None, f=None, jac=None, band=None, pre_processors=None,
post_processors=None, internal_x0_cb=None, **kwargs):
super(NeqSys, self).__init__(**kwargs)
if nx is None:
nx = len(self.names)
if f is None:
raise ValueError("A callback for f must be provided")
if nf < nx:
raise ValueError("Under-determined system")
self.nf, self.nx = nf, nx
self.f_cb = _ensure_3args(f)
self.j_cb = _ensure_3args(jac)
self.band = band
self.pre_processors = pre_processors or []
self.post_processors = post_processors or []
self.internal_x0_cb = internal_x0_cb
def pre_process(self, x0, params=()):
""" Used internally for transformation of variables. """
# Should be used by all methods matching "solve_*"
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name and isinstance(params, dict):
params = [params[k] for k in self.param_names]
for pre_processor in self.pre_processors:
x0, params = pre_processor(x0, params)
return x0, np.atleast_1d(params)
def post_process(self, xout, params_out):
""" Used internally for transformation of variables. """
# Should be used by all methods matching "solve_*"
for post_processor in self.post_processors:
xout, params_out = post_processor(xout, params_out)
return xout, params_out
def _solve_scipy(self, intern_x0, tol=1e-8, method=None, **kwargs):
""" Uses ``scipy.optimize.root``
See: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html
Parameters
----------
intern_x0: array_like
initial guess
tol: float
Tolerance
method: str
What method to use. Defaults to ``'lm'`` if ``self.nf > self.nx`` otherwise ``'hybr'``.
"""
from scipy.optimize import root
if method is None:
if self.nf > self.nx:
method = 'lm'
elif self.nf == self.nx:
method = 'hybr'
else:
raise ValueError('Underdetermined problem')
if 'band' in kwargs:
raise ValueError("Set 'band' at initialization instead.")
if 'args' in kwargs:
raise ValueError("Set 'args' as params in initialization instead.")
new_kwargs = kwargs.copy()
if self.band is not None:
warnings.warn("Band argument ignored (see SciPy docs)")
new_kwargs['band'] = self.band
new_kwargs['args'] = self.internal_params
return root(self.f_cb, intern_x0, jac=self.j_cb, method=method, tol=tol, **new_kwargs)
def _solve_nleq2(self, intern_x0, tol=1e-8, method=None, **kwargs):
from pynleq2 import solve
def f_cb(x, ierr):
f_cb.nfev += 1
return self.f_cb(x, self.internal_params), ierr
f_cb.nfev = 0
def j_cb(x, ierr):
j_cb.njev += 1
return self.j_cb(x, self.internal_params), ierr
j_cb.njev = 0
x, ierr = solve(f_cb, j_cb, intern_x0, **kwargs)
return {
'x': x,
'fun': np.asarray(f_cb(x, 0)),
'success': ierr == 0,
'nfev': f_cb.nfev,
'njev': j_cb.njev,
'ierr': ierr,
}
def _solve_kinsol(self, intern_x0, **kwargs):
import pykinsol
def _f(x, fout):
res = self.f_cb(x, self.internal_params)
fout[:] = res
def _j(x, Jout, fx):
res = self.j_cb(x, self.internal_params)
Jout[:, :] = res[:, :]
return pykinsol.solve(_f, _j, intern_x0, **kwargs)
def _solve_mpmath(self, intern_x0, dps=30, tol=None,
maxsteps=None, **kwargs):
import mpmath
from mpmath.calculus.optimization import MDNewton
mp = mpmath.mp
mp.dps = dps
def _mpf(val):
try:
return mp.mpf(val)
except TypeError: # e.g. mpmath chokes on numpy's int64
return mp.mpf(float(val))
intern_p = tuple(_mpf(_p) for _p in self.internal_params)
maxsteps = maxsteps or MDNewton.maxsteps
tol = tol or mp.eps * 1024
def f_cb(*x):
f_cb.nfev += 1
return self.f_cb(x, intern_p)
f_cb.nfev = 0
if self.j_cb is not None:
def j_cb(*x):
j_cb.njev += 1
return self.j_cb(x, intern_p)
j_cb.njev = 0
kwargs['J'] = j_cb
intern_x0 = tuple(_mpf(_x) for _x in intern_x0)
iters = MDNewton(mp, f_cb, intern_x0, norm=mp.norm, verbose=False, **kwargs)
i = 0
success = False
for x, err in iters:
i += 1
lim = tol*max(mp.norm(x), 1)
if err < lim:
success = True
break
if i >= maxsteps:
break
result = {'x': x, 'success': success, 'nfev': f_cb.nfev, 'nit': i}
if self.j_cb is not None:
result['njev'] = j_cb.njev
return result
def _solve_ipopt(self, intern_x0, **kwargs):
import warnings
from ipopt import minimize_ipopt
warnings.warn("ipopt interface has not yet undergone thorough testing.")
def f_cb(x):
f_cb.nfev += 1
return np.sum(np.abs(self.f_cb(x, self.internal_params)))
f_cb.nfev = 0
if self.j_cb is not None:
def j_cb(x):
j_cb.njev += 1
return self.j_cb(x, self.internal_params)
j_cb.njev = 0
kwargs['jac'] = j_cb
return minimize_ipopt(f_cb, intern_x0, **kwargs)
def _solve_levmar(self, intern_x0, tol=1e-8, **kwargs):
import warnings
import levmar
if 'eps1' in kwargs or 'eps2' in kwargs or 'eps3' in kwargs:
pass
else:
kwargs['eps1'] = kwargs['eps2'] = kwargs['eps3'] = tol
def _f(*args):
return np.asarray(self.f_cb(*args))
def _j(*args):
return np.asarray(self.j_cb(*args))
_x0 = np.asarray(intern_x0)
_y0 = np.zeros(self.nf)
with warnings.catch_warnings(record=True) as wrns:
warnings.simplefilter("always")
p_opt, p_cov, info = levmar.levmar(_f, _x0, _y0, args=(self.internal_params,),
jacf=_j, **kwargs)
success = len(wrns) == 0 and np.all(np.abs(_f(p_opt, self.internal_params)) < tol)
for w in wrns:
raise w
e2p0, (e2, infJTe, Dp2, mu_maxJTJii), nit, reason, nfev, njev, nlinsolv = info
return {'x': p_opt, 'cov': p_cov, 'nfev': nfev, 'njev': njev, 'nit': nit,
'message': reason, 'nlinsolv': nlinsolv, 'success': success}
|
bjodah/pyneqsys | pyneqsys/core.py | NeqSys._solve_scipy | python | def _solve_scipy(self, intern_x0, tol=1e-8, method=None, **kwargs):
from scipy.optimize import root
if method is None:
if self.nf > self.nx:
method = 'lm'
elif self.nf == self.nx:
method = 'hybr'
else:
raise ValueError('Underdetermined problem')
if 'band' in kwargs:
raise ValueError("Set 'band' at initialization instead.")
if 'args' in kwargs:
raise ValueError("Set 'args' as params in initialization instead.")
new_kwargs = kwargs.copy()
if self.band is not None:
warnings.warn("Band argument ignored (see SciPy docs)")
new_kwargs['band'] = self.band
new_kwargs['args'] = self.internal_params
return root(self.f_cb, intern_x0, jac=self.j_cb, method=method, tol=tol, **new_kwargs) | Uses ``scipy.optimize.root``
See: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html
Parameters
----------
intern_x0: array_like
initial guess
tol: float
Tolerance
method: str
What method to use. Defaults to ``'lm'`` if ``self.nf > self.nx`` otherwise ``'hybr'``. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L392-L426 | null | class NeqSys(_NeqSysBase):
"""Represents a system of non-linear equations.
This class provides a unified interface to:
- scipy.optimize.root
- NLEQ2
- KINSOL
- mpmath
- levmar
Parameters
----------
nf : int
Number of functions.
nx : int
Number of independent variables.
f : callback
Function to solve for. Signature ``f(x) -> y`` where ``len(x) == nx``
and ``len(y) == nf``.
jac : callback or None (default)
Jacobian matrix (dfdy).
band : tuple (default: None)
Number of sub- and super-diagonals in jacobian.
names : iterable of str (default: None)
Names of variables, used for plotting and for referencing by name.
param_names : iterable of strings (default: None)
Names of the parameters, used for referencing parameters by name.
x_by_name : bool, default: ``False``
Will values for *x* be referred to by name (in dictionaries)
instead of by index (in arrays)?
par_by_name : bool, default: ``False``
Will values for parameters be referred to by name (in dictionaries)
instead of by index (in arrays)?
latex_names : iterable of str, optional
Names of variables in LaTeX format.
latex_param_names : iterable of str, optional
Names of parameters in LaTeX format.
pre_processors : iterable of callables (optional)
(Forward) transformation of user-input to :py:meth:`solve`
signature: ``f(x1[:], params1[:]) -> x2[:], params2[:]``.
Insert at beginning.
post_processors : iterable of callables (optional)
(Backward) transformation of result from :py:meth:`solve`
signature: ``f(x2[:], params2[:]) -> x1[:], params1[:]``.
Insert at end.
internal_x0_cb : callback (optional)
callback with signature ``f(x[:], p[:]) -> x0[:]``
if not specified, ``x`` from ``self.pre_processors`` will be used.
Examples
--------
>>> neqsys = NeqSys(2, 2, lambda x, p: [(x[0] - x[1])**p[0]/2 + x[0] - 1,
... (x[1] - x[0])**p[0]/2 + x[1]])
>>> x, sol = neqsys.solve([1, 0], [3])
>>> assert sol['success']
>>> print(x)
[ 0.8411639 0.1588361]
See Also
--------
pyneqsys.symbolic.SymbolicSys : use a CAS (SymPy by default) to derive
the jacobian.
"""
def __init__(self, nf, nx=None, f=None, jac=None, band=None, pre_processors=None,
post_processors=None, internal_x0_cb=None, **kwargs):
super(NeqSys, self).__init__(**kwargs)
if nx is None:
nx = len(self.names)
if f is None:
raise ValueError("A callback for f must be provided")
if nf < nx:
raise ValueError("Under-determined system")
self.nf, self.nx = nf, nx
self.f_cb = _ensure_3args(f)
self.j_cb = _ensure_3args(jac)
self.band = band
self.pre_processors = pre_processors or []
self.post_processors = post_processors or []
self.internal_x0_cb = internal_x0_cb
def pre_process(self, x0, params=()):
""" Used internally for transformation of variables. """
# Should be used by all methods matching "solve_*"
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name and isinstance(params, dict):
params = [params[k] for k in self.param_names]
for pre_processor in self.pre_processors:
x0, params = pre_processor(x0, params)
return x0, np.atleast_1d(params)
def post_process(self, xout, params_out):
""" Used internally for transformation of variables. """
# Should be used by all methods matching "solve_*"
for post_processor in self.post_processors:
xout, params_out = post_processor(xout, params_out)
return xout, params_out
def solve(self, x0, params=(), internal_x0=None, solver=None, attached_solver=None, **kwargs):
""" Solve with user specified ``solver`` choice.
Parameters
----------
x0: 1D array of floats
Guess (subject to ``self.post_processors``)
params: 1D array_like of floats
Parameters (subject to ``self.post_processors``)
internal_x0: 1D array of floats
When given it overrides (processed) ``x0``. ``internal_x0`` is not
subject to ``self.post_processors``.
solver: str or callable or None or iterable of such
if str: uses _solve_``solver``(\*args, \*\*kwargs).
if ``None``: chooses from PYNEQSYS_SOLVER environment variable.
if iterable: chain solving.
attached_solver: callable factory
Invokes: solver = attached_solver(self).
Returns
-------
array:
solution vector (post-processed by self.post_processors)
dict:
info dictionary containing 'success', 'nfev', 'njev' etc.
Examples
--------
>>> neqsys = NeqSys(2, 2, lambda x, p: [
... (x[0] - x[1])**p[0]/2 + x[0] - 1,
... (x[1] - x[0])**p[0]/2 + x[1]
... ])
>>> x, sol = neqsys.solve([1, 0], [3], solver=(None, 'mpmath'))
>>> assert sol['success']
>>> print(x)
[0.841163901914009663684741869855]
[0.158836098085990336315258130144]
"""
if not isinstance(solver, (tuple, list)):
solver = [solver]
if not isinstance(attached_solver, (tuple, list)):
attached_solver = [attached_solver] + [None]*(len(solver) - 1)
_x0, self.internal_params = self.pre_process(x0, params)
for solv, attached_solv in zip(solver, attached_solver):
if internal_x0 is not None:
_x0 = internal_x0
elif self.internal_x0_cb is not None:
_x0 = self.internal_x0_cb(x0, params)
nfo = self._get_solver_cb(solv, attached_solv)(_x0, **kwargs)
_x0 = nfo['x'].copy()
self.internal_x = _x0
x0 = self.post_process(self.internal_x, self.internal_params)[0]
return x0, nfo
def _solve_nleq2(self, intern_x0, tol=1e-8, method=None, **kwargs):
from pynleq2 import solve
def f_cb(x, ierr):
f_cb.nfev += 1
return self.f_cb(x, self.internal_params), ierr
f_cb.nfev = 0
def j_cb(x, ierr):
j_cb.njev += 1
return self.j_cb(x, self.internal_params), ierr
j_cb.njev = 0
x, ierr = solve(f_cb, j_cb, intern_x0, **kwargs)
return {
'x': x,
'fun': np.asarray(f_cb(x, 0)),
'success': ierr == 0,
'nfev': f_cb.nfev,
'njev': j_cb.njev,
'ierr': ierr,
}
def _solve_kinsol(self, intern_x0, **kwargs):
import pykinsol
def _f(x, fout):
res = self.f_cb(x, self.internal_params)
fout[:] = res
def _j(x, Jout, fx):
res = self.j_cb(x, self.internal_params)
Jout[:, :] = res[:, :]
return pykinsol.solve(_f, _j, intern_x0, **kwargs)
def _solve_mpmath(self, intern_x0, dps=30, tol=None,
maxsteps=None, **kwargs):
import mpmath
from mpmath.calculus.optimization import MDNewton
mp = mpmath.mp
mp.dps = dps
def _mpf(val):
try:
return mp.mpf(val)
except TypeError: # e.g. mpmath chokes on numpy's int64
return mp.mpf(float(val))
intern_p = tuple(_mpf(_p) for _p in self.internal_params)
maxsteps = maxsteps or MDNewton.maxsteps
tol = tol or mp.eps * 1024
def f_cb(*x):
f_cb.nfev += 1
return self.f_cb(x, intern_p)
f_cb.nfev = 0
if self.j_cb is not None:
def j_cb(*x):
j_cb.njev += 1
return self.j_cb(x, intern_p)
j_cb.njev = 0
kwargs['J'] = j_cb
intern_x0 = tuple(_mpf(_x) for _x in intern_x0)
iters = MDNewton(mp, f_cb, intern_x0, norm=mp.norm, verbose=False, **kwargs)
i = 0
success = False
for x, err in iters:
i += 1
lim = tol*max(mp.norm(x), 1)
if err < lim:
success = True
break
if i >= maxsteps:
break
result = {'x': x, 'success': success, 'nfev': f_cb.nfev, 'nit': i}
if self.j_cb is not None:
result['njev'] = j_cb.njev
return result
def _solve_ipopt(self, intern_x0, **kwargs):
import warnings
from ipopt import minimize_ipopt
warnings.warn("ipopt interface has not yet undergone thorough testing.")
def f_cb(x):
f_cb.nfev += 1
return np.sum(np.abs(self.f_cb(x, self.internal_params)))
f_cb.nfev = 0
if self.j_cb is not None:
def j_cb(x):
j_cb.njev += 1
return self.j_cb(x, self.internal_params)
j_cb.njev = 0
kwargs['jac'] = j_cb
return minimize_ipopt(f_cb, intern_x0, **kwargs)
def _solve_levmar(self, intern_x0, tol=1e-8, **kwargs):
import warnings
import levmar
if 'eps1' in kwargs or 'eps2' in kwargs or 'eps3' in kwargs:
pass
else:
kwargs['eps1'] = kwargs['eps2'] = kwargs['eps3'] = tol
def _f(*args):
return np.asarray(self.f_cb(*args))
def _j(*args):
return np.asarray(self.j_cb(*args))
_x0 = np.asarray(intern_x0)
_y0 = np.zeros(self.nf)
with warnings.catch_warnings(record=True) as wrns:
warnings.simplefilter("always")
p_opt, p_cov, info = levmar.levmar(_f, _x0, _y0, args=(self.internal_params,),
jacf=_j, **kwargs)
success = len(wrns) == 0 and np.all(np.abs(_f(p_opt, self.internal_params)) < tol)
for w in wrns:
raise w
e2p0, (e2, infJTe, Dp2, mu_maxJTJii), nit, reason, nfev, njev, nlinsolv = info
return {'x': p_opt, 'cov': p_cov, 'nfev': nfev, 'njev': njev, 'nit': nit,
'message': reason, 'nlinsolv': nlinsolv, 'success': success}
|
bjodah/pyneqsys | pyneqsys/core.py | ConditionalNeqSys.solve | python | def solve(self, x0, params=(), internal_x0=None, solver=None,
conditional_maxiter=20, initial_conditions=None, **kwargs):
if initial_conditions is not None:
conds = initial_conditions
else:
conds = self.get_conds(x0, params, initial_conditions)
idx, nfev, njev = 0, 0, 0
while idx < conditional_maxiter:
neqsys = self.neqsys_factory(conds)
x0, info = neqsys.solve(x0, params, internal_x0, solver, **kwargs)
if idx == 0:
internal_x0 = None
nfev += info['nfev']
njev += info.get('njev', 0)
new_conds = self.get_conds(x0, params, conds)
if new_conds == conds:
break
else:
conds = new_conds
idx += 1
if idx == conditional_maxiter:
raise Exception("Solving failed, conditional_maxiter reached")
self.internal_x = info['x']
self.internal_params = neqsys.internal_params
result = {
'x': info['x'],
'success': info['success'],
'conditions': conds,
'nfev': nfev,
'njev': njev,
}
if 'fun' in info:
result['fun'] = info['fun']
return x0, result | Solve the problem (systems of equations)
Parameters
----------
x0 : array
Guess.
params : array
See :meth:`NeqSys.solve`.
internal_x0 : array
See :meth:`NeqSys.solve`.
solver : str or callable or iterable of such.
See :meth:`NeqSys.solve`.
conditional_maxiter : int
Maximum number of switches between conditions.
initial_conditions : iterable of bools
Corresponding conditions to ``x0``
\\*\\*kwargs :
Keyword arguments passed on to :meth:`NeqSys.solve`. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L626-L679 | [
"def get_conds(self, x, params, prev_conds=None):\n if prev_conds is None:\n prev_conds = [False]*len(self.condition_cb_pairs)\n return tuple([bw(x, params) if prev else fw(x, params) for\n prev, (fw, bw) in zip(prev_conds, self.condition_cb_pairs)])\n"
] | class ConditionalNeqSys(_NeqSysBase):
""" Collect multiple systems of non-linear equations with different
conditionals.
If a problem in a fixed number of variables is described by different
systems of equations for different domains, then this class may be used
to describe that set of systems.
The user provides a set of conditions which governs what system of
equations to apply. The set of conditions then represent a vector
of booleans which is passed to a user provided factory function of
NeqSys instances. The conditions may be asymmetrical (each condition
consits of two callbacks, one for evaluating when the condition was
previously ``False``, and one when it was previously ``True``. The motivation
for this asymmetry is that a user may want to introduce a tolerance for
numerical noise in the solution (and avoid possibly endless loops).
If ``fastcache`` is available an LRU cache will be used for
``neqsys_factory``, it is therefore important that the factory function
is idempotent.
Parameters
----------
condition_cb_pairs : list of (callback, callback) tuples
Callbacks should have the signature: ``f(x, p) -> bool``.
neqsys_factory : callback
Should have the signature ``f(conds) -> NeqSys instance``,
where conds is a list of bools.
names : list of strings
Examples
--------
>>> from math import sin, pi
>>> f_a = lambda x, p: [sin(p[0]*x[0])] # when x <= 0
>>> f_b = lambda x, p: [x[0]*(p[1]-x[0])] # when x >= 0
>>> factory = lambda conds: NeqSys(1, 1, f_b) if conds[0] else NeqSys(
... 1, 1, f_a)
>>> cneqsys = ConditionalNeqSys([(lambda x, p: x[0] > 0, # no 0-switch
... lambda x, p: x[0] >= 0)], # no 0-switch
... factory)
>>> x, sol = cneqsys.solve([0], [pi, 3])
>>> assert sol['success']
>>> print(x)
[ 0.]
>>> x, sol = cneqsys.solve([-1.4], [pi, 3])
>>> assert sol['success']
>>> print(x)
[-1.]
>>> x, sol = cneqsys.solve([2], [pi, 3])
>>> assert sol['success']
>>> print(x)
[ 3.]
>>> x, sol = cneqsys.solve([7], [pi, 3])
>>> assert sol['success']
>>> print(x)
[ 3.]
"""
def __init__(self, condition_cb_pairs, neqsys_factory, **kwargs):
super(ConditionalNeqSys, self).__init__(**kwargs)
self.condition_cb_pairs = condition_cb_pairs
self.neqsys_factory = _cache_it(neqsys_factory)
def get_conds(self, x, params, prev_conds=None):
if prev_conds is None:
prev_conds = [False]*len(self.condition_cb_pairs)
return tuple([bw(x, params) if prev else fw(x, params) for
prev, (fw, bw) in zip(prev_conds, self.condition_cb_pairs)])
def post_process(self, x, params, conds=None):
if conds is None:
conds = self.get_conds(x, params)
return self.neqsys_factory(conds).post_process(x, params)
post_process.__doc__ = NeqSys.post_process.__doc__
def pre_process(self, x, params, conds=None):
if conds is None:
conds = self.get_conds(x, params)
return self.neqsys_factory(conds).pre_process(x, params)
pre_process.__doc__ = NeqSys.pre_process.__doc__
def f_cb(self, x, params, conds=None):
if conds is None:
conds = self.get_conds(x, params)
return self.neqsys_factory(conds).f_cb(x, params)
|
bjodah/pyneqsys | examples/bi_dimensional.py | solve | python | def solve(guess_a, guess_b, power, solver='scipy'):
# The problem is 2 dimensional so we need 2 symbols
x = sp.symbols('x:2', real=True)
# There is a user specified parameter ``p`` in this problem:
p = sp.Symbol('p', real=True, negative=False, integer=True)
# Our system consists of 2-non-linear equations:
f = [x[0] + (x[0] - x[1])**p/2 - 1,
(x[1] - x[0])**p/2 + x[1]]
# We construct our ``SymbolicSys`` instance by passing variables, equations and parameters:
neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically)
# Finally we solve the system using user-specified ``solver`` choice:
return neqsys.solve([guess_a, guess_b], [power], solver=solver) | Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/examples/bi_dimensional.py#L13-L26 | [
"def solve(self, x0, params=(), internal_x0=None, solver=None, attached_solver=None, **kwargs):\n \"\"\" Solve with user specified ``solver`` choice.\n\n Parameters\n ----------\n x0: 1D array of floats\n Guess (subject to ``self.post_processors``)\n params: 1D array_like of floats\n Parameters (subject to ``self.post_processors``)\n internal_x0: 1D array of floats\n When given it overrides (processed) ``x0``. ``internal_x0`` is not\n subject to ``self.post_processors``.\n solver: str or callable or None or iterable of such\n if str: uses _solve_``solver``(\\*args, \\*\\*kwargs).\n if ``None``: chooses from PYNEQSYS_SOLVER environment variable.\n if iterable: chain solving.\n attached_solver: callable factory\n Invokes: solver = attached_solver(self).\n\n Returns\n -------\n array:\n solution vector (post-processed by self.post_processors)\n dict:\n info dictionary containing 'success', 'nfev', 'njev' etc.\n\n Examples\n --------\n >>> neqsys = NeqSys(2, 2, lambda x, p: [\n ... (x[0] - x[1])**p[0]/2 + x[0] - 1,\n ... (x[1] - x[0])**p[0]/2 + x[1]\n ... ])\n >>> x, sol = neqsys.solve([1, 0], [3], solver=(None, 'mpmath'))\n >>> assert sol['success']\n >>> print(x)\n [0.841163901914009663684741869855]\n [0.158836098085990336315258130144]\n\n \"\"\"\n if not isinstance(solver, (tuple, list)):\n solver = [solver]\n if not isinstance(attached_solver, (tuple, list)):\n attached_solver = [attached_solver] + [None]*(len(solver) - 1)\n _x0, self.internal_params = self.pre_process(x0, params)\n for solv, attached_solv in zip(solver, attached_solver):\n if internal_x0 is not None:\n _x0 = internal_x0\n elif self.internal_x0_cb is not None:\n _x0 = self.internal_x0_cb(x0, params)\n\n nfo = self._get_solver_cb(solv, attached_solv)(_x0, **kwargs)\n _x0 = nfo['x'].copy()\n self.internal_x = _x0\n x0 = self.post_process(self.internal_x, self.internal_params)[0]\n return x0, nfo\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PYTHON_ARGCOMPLETE_OK
# Pass --help flag for help on command-line interface
import sympy as sp
import numpy as np
from pyneqsys.symbolic import SymbolicSys
def main(guess_a=1., guess_b=0., power=3, savetxt='None', verbose=False):
"""
Example demonstrating how to solve a system of non-linear equations defined as SymPy expressions.
The example shows how a non-linear problem can be given a command-line interface which may be
preferred by end-users who are not familiar with Python.
"""
x, sol = solve(guess_a, guess_b, power) # see function definition above
assert sol.success
if savetxt != 'None':
np.savetxt(x, savetxt)
else:
if verbose:
print(sol)
else:
print(x)
if __name__ == '__main__': # are we running from the command line (or are we being imported from)?
try:
import argh
argh.dispatch_command(main, output_file=None)
except ImportError:
import sys
if len(sys.argv) > 1:
import warnings
warnings.warn("Ignoring parameters run "
"'pip install --user argh' to fix.")
main()
|
bjodah/pyneqsys | examples/bi_dimensional.py | main | python | def main(guess_a=1., guess_b=0., power=3, savetxt='None', verbose=False):
x, sol = solve(guess_a, guess_b, power) # see function definition above
assert sol.success
if savetxt != 'None':
np.savetxt(x, savetxt)
else:
if verbose:
print(sol)
else:
print(x) | Example demonstrating how to solve a system of non-linear equations defined as SymPy expressions.
The example shows how a non-linear problem can be given a command-line interface which may be
preferred by end-users who are not familiar with Python. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/examples/bi_dimensional.py#L29-L44 | [
"def solve(guess_a, guess_b, power, solver='scipy'):\n \"\"\" Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. \"\"\"\n # The problem is 2 dimensional so we need 2 symbols\n x = sp.symbols('x:2', real=True)\n # There is a user specified parameter ``p`` in this problem:\n p = sp.Symbol('p', real=True, negative=False, integer=True)\n # Our system consists of 2-non-linear equations:\n f = [x[0] + (x[0] - x[1])**p/2 - 1,\n (x[1] - x[0])**p/2 + x[1]]\n # We construct our ``SymbolicSys`` instance by passing variables, equations and parameters:\n neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically)\n\n # Finally we solve the system using user-specified ``solver`` choice:\n return neqsys.solve([guess_a, guess_b], [power], solver=solver)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PYTHON_ARGCOMPLETE_OK
# Pass --help flag for help on command-line interface
import sympy as sp
import numpy as np
from pyneqsys.symbolic import SymbolicSys
def solve(guess_a, guess_b, power, solver='scipy'):
""" Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. """
# The problem is 2 dimensional so we need 2 symbols
x = sp.symbols('x:2', real=True)
# There is a user specified parameter ``p`` in this problem:
p = sp.Symbol('p', real=True, negative=False, integer=True)
# Our system consists of 2-non-linear equations:
f = [x[0] + (x[0] - x[1])**p/2 - 1,
(x[1] - x[0])**p/2 + x[1]]
# We construct our ``SymbolicSys`` instance by passing variables, equations and parameters:
neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically)
# Finally we solve the system using user-specified ``solver`` choice:
return neqsys.solve([guess_a, guess_b], [power], solver=solver)
if __name__ == '__main__': # are we running from the command line (or are we being imported from)?
try:
import argh
argh.dispatch_command(main, output_file=None)
except ImportError:
import sys
if len(sys.argv) > 1:
import warnings
warnings.warn("Ignoring parameters run "
"'pip install --user argh' to fix.")
main()
|
bjodah/pyneqsys | pyneqsys/plotting.py | plot_series | python | def plot_series(xres, varied_data, indices=None, info=None,
fail_vline=None, plot_kwargs_cb=None,
ls=('-', '--', ':', '-.'),
c=('k', 'r', 'g', 'b', 'c', 'm', 'y'), labels=None,
ax=None, names=None, latex_names=None):
import matplotlib.pyplot as plt
if indices is None:
indices = range(xres.shape[1])
if fail_vline is None:
if info is None:
fail_vline = False
else:
fail_vline = True
if ax is None:
ax = plt.subplot(1, 1, 1)
if labels is None:
labels = names if latex_names is None else ['$%s$' % ln.strip('$') for ln in latex_names]
if plot_kwargs_cb is None:
def plot_kwargs_cb(idx, labels=None):
kwargs = {'ls': ls[idx % len(ls)],
'c': c[idx % len(c)]}
if labels:
kwargs['label'] = labels[idx]
return kwargs
else:
plot_kwargs_cb = plot_kwargs_cb or (lambda idx: {})
for idx in indices:
ax.plot(varied_data, xres[:, idx], **plot_kwargs_cb(idx, labels=labels))
if fail_vline:
for i, nfo in enumerate(info):
if not nfo['success']:
ax.axvline(varied_data[i], c='k', ls='--')
return ax | Plot the values of the solution vector vs the varied parameter.
Parameters
----------
xres : array
Solution vector of shape ``(varied_data.size, x0.size)``.
varied_data : array
Numerical values of the varied parameter.
indices : iterable of integers, optional
Indices of variables to be plotted. default: all
fail_vline : bool
Show vertical lines where the solver failed.
plot_kwargs_cb : callable
Takes the index as single argument, returns a dict passed to
the plotting function
ls : iterable of str
Linestyles.
c : iterable of str
Colors.
labels : iterable of str
ax : matplotlib Axes instance
names : iterable of str
latex_names : iterable of str | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/plotting.py#L4-L68 | [
"def plot_kwargs_cb(idx, labels=None):\n kwargs = {'ls': ls[idx % len(ls)],\n 'c': c[idx % len(c)]}\n if labels:\n kwargs['label'] = labels[idx]\n return kwargs\n"
] | # -*- coding: utf-8 -*-
def mpl_outside_legend(ax, **kwargs):
""" Places a legend box outside a matplotlib Axes instance. """
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1), **kwargs)
|
bjodah/pyneqsys | pyneqsys/plotting.py | mpl_outside_legend | python | def mpl_outside_legend(ax, **kwargs):
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1), **kwargs) | Places a legend box outside a matplotlib Axes instance. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/plotting.py#L71-L76 | null | # -*- coding: utf-8 -*-
def plot_series(xres, varied_data, indices=None, info=None,
fail_vline=None, plot_kwargs_cb=None,
ls=('-', '--', ':', '-.'),
c=('k', 'r', 'g', 'b', 'c', 'm', 'y'), labels=None,
ax=None, names=None, latex_names=None):
""" Plot the values of the solution vector vs the varied parameter.
Parameters
----------
xres : array
Solution vector of shape ``(varied_data.size, x0.size)``.
varied_data : array
Numerical values of the varied parameter.
indices : iterable of integers, optional
Indices of variables to be plotted. default: all
fail_vline : bool
Show vertical lines where the solver failed.
plot_kwargs_cb : callable
Takes the index as single argument, returns a dict passed to
the plotting function
ls : iterable of str
Linestyles.
c : iterable of str
Colors.
labels : iterable of str
ax : matplotlib Axes instance
names : iterable of str
latex_names : iterable of str
"""
import matplotlib.pyplot as plt
if indices is None:
indices = range(xres.shape[1])
if fail_vline is None:
if info is None:
fail_vline = False
else:
fail_vline = True
if ax is None:
ax = plt.subplot(1, 1, 1)
if labels is None:
labels = names if latex_names is None else ['$%s$' % ln.strip('$') for ln in latex_names]
if plot_kwargs_cb is None:
def plot_kwargs_cb(idx, labels=None):
kwargs = {'ls': ls[idx % len(ls)],
'c': c[idx % len(c)]}
if labels:
kwargs['label'] = labels[idx]
return kwargs
else:
plot_kwargs_cb = plot_kwargs_cb or (lambda idx: {})
for idx in indices:
ax.plot(varied_data, xres[:, idx], **plot_kwargs_cb(idx, labels=labels))
if fail_vline:
for i, nfo in enumerate(info):
if not nfo['success']:
ax.axvline(varied_data[i], c='k', ls='--')
return ax
|
bjodah/pyneqsys | pyneqsys/symbolic.py | linear_rref | python | def linear_rref(A, b, Matrix=None, S=None):
if Matrix is None:
from sympy import Matrix
if S is None:
from sympy import S
mat_rows = [_map2l(S, list(row) + [v]) for row, v in zip(A, b)]
aug = Matrix(mat_rows)
raug, pivot = aug.rref()
nindep = len(pivot)
return raug[:nindep, :-1], raug[:nindep, -1] | Transform a linear system to reduced row-echelon form
Transforms both the matrix and right-hand side of a linear
system of equations to reduced row echelon form
Parameters
----------
A : Matrix-like
Iterable of rows.
b : iterable
Returns
-------
A', b' - transformed versions | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/symbolic.py#L284-L309 | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from itertools import chain
import numpy as np
from sym import Backend
from sym.util import banded_jacobian, check_transforms
from .core import NeqSys, _ensure_3args
def _map2(cb, iterable):
if cb is None: # identity function is assumed
return iterable
else:
return map(cb, iterable)
def _map2l(cb, iterable): # Py2 type of map in Py3
return list(_map2(cb, iterable))
class SymbolicSys(NeqSys):
""" Symbolically defined system of non-linear equations.
This object is analogous to :class:`pyneqsys.NeqSys` but instead of
providing a callable, the user provides symbolic expressions.
Parameters
----------
x : iterable of Symbols
exprs : iterable of expressions for ``f``
params : iterable of Symbols (optional)
list of symbols appearing in exprs which are parameters
jac : ImmutableMatrix or bool
If ``True``:
- Calculate Jacobian from ``exprs``.
If ``False``:
- Do not compute Jacobian (numeric approximation).
If ImmutableMatrix:
- User provided expressions for the Jacobian.
backend : str or sym.Backend
See documentation of `sym.Backend \
<https://pythonhosted.org/sym/sym.html#sym.backend.Backend>`_.
module : str
``module`` keyword argument passed to ``backend.Lambdify``.
\\*\\*kwargs:
See :py:class:`pyneqsys.core.NeqSys`.
Examples
--------
>>> import sympy as sp
>>> e = sp.exp
>>> x = x0, x1 = sp.symbols('x:2')
>>> params = a, b = sp.symbols('a b')
>>> neqsys = SymbolicSys(x, [a*(1 - x0), b*(x1 - x0**2)], params)
>>> xout, sol = neqsys.solve('scipy', [-10, -5], [1, 10])
>>> print(xout)
[ 1. 1.]
>>> print(neqsys.get_jac()[0, 0])
-a
Notes
-----
When using SymPy as the backend, a limited number of unknowns is supported.
The reason is that (currently) ``sympy.lambdify`` has an upper limit on
number of arguments.
"""
def __init__(self, x, exprs, params=(), jac=True, backend=None, **kwargs):
self.x = x
self.exprs = exprs
self.params = params
self._jac = jac
self.be = Backend(backend)
self.nf, self.nx = len(exprs), len(x) # needed by get_*_cb
self.band = kwargs.get('band', None) # needed by get_*_cb
self.module = kwargs.pop('module', 'numpy')
super(SymbolicSys, self).__init__(self.nf, self.nx,
self._get_f_cb(),
self._get_j_cb(),
**kwargs)
@classmethod
def from_callback(cls, cb, nx=None, nparams=None, **kwargs):
""" Generate a SymbolicSys instance from a callback.
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
nx : int
Number of unknowns, when not given it is deduced from ``kwargs['names']``.
nparams : int
Number of parameters, when not given it is deduced from ``kwargs['param_names']``.
\\*\\*kwargs :
Keyword arguments passed on to :class:`SymbolicSys`. See also :class:`pyneqsys.NeqSys`.
Examples
--------
>>> symbolicsys = SymbolicSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], 2, 1)
...
"""
if kwargs.get('x_by_name', False):
if 'names' not in kwargs:
raise ValueError("Need ``names`` in kwargs.")
if nx is None:
nx = len(kwargs['names'])
elif nx != len(kwargs['names']):
raise ValueError("Inconsistency between nx and length of ``names``.")
if kwargs.get('par_by_name', False):
if 'param_names' not in kwargs:
raise ValueError("Need ``param_names`` in kwargs.")
if nparams is None:
nparams = len(kwargs['param_names'])
elif nparams != len(kwargs['param_names']):
raise ValueError("Inconsistency between ``nparam`` and length of ``param_names``.")
if nparams is None:
nparams = 0
if nx is None:
raise ValueError("Need ``nx`` of ``names`` together with ``x_by_name==True``.")
be = Backend(kwargs.pop('backend', None))
x = be.real_symarray('x', nx)
p = be.real_symarray('p', nparams)
_x = dict(zip(kwargs['names'], x)) if kwargs.get('x_by_name', False) else x
_p = dict(zip(kwargs['param_names'], p)) if kwargs.get('par_by_name', False) else p
try:
exprs = cb(_x, _p, be)
except TypeError:
exprs = _ensure_3args(cb)(_x, _p, be)
return cls(x, exprs, p, backend=be, **kwargs)
def get_jac(self):
""" Return the jacobian of the expressions """
if self._jac is True:
if self.band is None:
f = self.be.Matrix(self.nf, 1, self.exprs)
_x = self.be.Matrix(self.nx, 1, self.x)
return f.jacobian(_x)
else:
# Banded
return self.be.Matrix(banded_jacobian(
self.exprs, self.x, *self.band))
elif self._jac is False:
return False
else:
return self._jac
def _get_f_cb(self):
args = list(chain(self.x, self.params))
kw = dict(module=self.module, dtype=object if self.module == 'mpmath' else None)
try:
cb = self.be.Lambdify(args, self.exprs, **kw)
except TypeError:
cb = self.be.Lambdify(args, self.exprs)
def f(x, params):
return cb(np.concatenate((x, params), axis=-1))
return f
def _get_j_cb(self):
args = list(chain(self.x, self.params))
kw = dict(module=self.module, dtype=object if self.module == 'mpmath' else None)
try:
cb = self.be.Lambdify(args, self.get_jac(), **kw)
except TypeError:
cb = self.be.Lambdify(args, self.get_jac())
def j(x, params):
return cb(np.concatenate((x, params), axis=-1))
return j
_use_symbol_latex_names = True
def _repr_latex_(self): # pretty printing in Jupyter notebook
from ._sympy import NeqSysTexPrinter
if self.latex_names and (self.latex_param_names if len(self.params) else True):
pretty = {s: n for s, n in chain(
zip(self.x, self.latex_names) if self._use_symbol_latex_names else [],
zip(self.params, self.latex_param_names)
)}
else:
pretty = {}
return '$%s$' % NeqSysTexPrinter(dict(symbol_names=pretty)).doprint(self.exprs)
class TransformedSys(SymbolicSys):
""" A system which transforms the equations and variables internally
Can be used to reformulate a problem in a numerically more stable form.
Parameters
----------
x : iterable of variables
exprs : iterable of expressions
Expressions to find root for (untransformed).
transf : iterable of pairs of expressions
Forward, backward transformed instances of x.
params : iterable of symbols
post_adj : callable (default: None)
To tweak expression after transformation.
\\*\\*kwargs :
Keyword arguments passed onto :class:`SymbolicSys`.
"""
_use_symbol_latex_names = False # symbols have been transformed
def __init__(self, x, exprs, transf, params=(), post_adj=None, **kwargs):
self.fw, self.bw = zip(*transf)
check_transforms(self.fw, self.bw, x)
exprs = [e.subs(zip(x, self.fw)) for e in exprs]
super(TransformedSys, self).__init__(
x, _map2l(post_adj, exprs), params,
pre_processors=[lambda xarr, params: (self.bw_cb(xarr), params)],
post_processors=[lambda xarr, params: (self.fw_cb(xarr), params)],
**kwargs)
self.fw_cb = self.be.Lambdify(x, self.fw)
self.bw_cb = self.be.Lambdify(x, self.bw)
@classmethod
def from_callback(cls, cb, transf_cbs, nx, nparams=0, pre_adj=None,
**kwargs):
""" Generate a TransformedSys instance from a callback
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
The callback ``cb`` should return *untransformed* expressions.
transf_cbs : pair or iterable of pairs of callables
Callables for forward- and backward-transformations. Each
callable should take a single parameter (expression) and
return a single expression.
nx : int
Number of unkowns.
nparams : int
Number of parameters.
pre_adj : callable, optional
To tweak expression prior to transformation. Takes a
sinlge argument (expression) and return a single argument
rewritten expression.
\\*\\*kwargs :
Keyword arguments passed on to :class:`TransformedSys`. See also
:class:`SymbolicSys` and :class:`pyneqsys.NeqSys`.
Examples
--------
>>> import sympy as sp
>>> transformed = TransformedSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], (sp.log, sp.exp), 2, 1)
...
"""
be = Backend(kwargs.pop('backend', None))
x = be.real_symarray('x', nx)
p = be.real_symarray('p', nparams)
try:
transf = [(transf_cbs[idx][0](xi),
transf_cbs[idx][1](xi))
for idx, xi in enumerate(x)]
except TypeError:
transf = zip(_map2(transf_cbs[0], x), _map2(transf_cbs[1], x))
try:
exprs = cb(x, p, be)
except TypeError:
exprs = _ensure_3args(cb)(x, p, be)
return cls(x, _map2l(pre_adj, exprs), transf, p, backend=be, **kwargs)
def linear_exprs(A, x, b=None, rref=False, Matrix=None):
""" Returns Ax - b
Parameters
----------
A : matrix_like of numbers
Of shape (len(b), len(x)).
x : iterable of symbols
b : array_like of numbers (default: None)
When ``None``, assume zeros of length ``len(x)``.
Matrix : class
When ``rref == True``: A matrix class which supports slicing,
and methods ``__mul__`` and ``rref``. Defaults to ``sympy.Matrix``.
rref : bool
Calculate the reduced row echelon form of (A | -b).
Returns
-------
A list of the elements in the resulting column vector.
"""
if b is None:
b = [0]*len(x)
if rref:
rA, rb = linear_rref(A, b, Matrix)
if Matrix is None:
from sympy import Matrix
return [lhs - rhs for lhs, rhs in zip(rA * Matrix(len(x), 1, x), rb)]
else:
return [sum([x0*x1 for x0, x1 in zip(row, x)]) - v
for row, v in zip(A, b)]
|
bjodah/pyneqsys | pyneqsys/symbolic.py | linear_exprs | python | def linear_exprs(A, x, b=None, rref=False, Matrix=None):
if b is None:
b = [0]*len(x)
if rref:
rA, rb = linear_rref(A, b, Matrix)
if Matrix is None:
from sympy import Matrix
return [lhs - rhs for lhs, rhs in zip(rA * Matrix(len(x), 1, x), rb)]
else:
return [sum([x0*x1 for x0, x1 in zip(row, x)]) - v
for row, v in zip(A, b)] | Returns Ax - b
Parameters
----------
A : matrix_like of numbers
Of shape (len(b), len(x)).
x : iterable of symbols
b : array_like of numbers (default: None)
When ``None``, assume zeros of length ``len(x)``.
Matrix : class
When ``rref == True``: A matrix class which supports slicing,
and methods ``__mul__`` and ``rref``. Defaults to ``sympy.Matrix``.
rref : bool
Calculate the reduced row echelon form of (A | -b).
Returns
-------
A list of the elements in the resulting column vector. | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/symbolic.py#L312-L342 | [
"def linear_rref(A, b, Matrix=None, S=None):\n \"\"\" Transform a linear system to reduced row-echelon form\n\n Transforms both the matrix and right-hand side of a linear\n system of equations to reduced row echelon form\n\n Parameters\n ----------\n A : Matrix-like\n Iterable of rows.\n b : iterable\n\n Returns\n -------\n A', b' - transformed versions\n\n \"\"\"\n if Matrix is None:\n from sympy import Matrix\n if S is None:\n from sympy import S\n mat_rows = [_map2l(S, list(row) + [v]) for row, v in zip(A, b)]\n aug = Matrix(mat_rows)\n raug, pivot = aug.rref()\n nindep = len(pivot)\n return raug[:nindep, :-1], raug[:nindep, -1]\n"
] | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from itertools import chain
import numpy as np
from sym import Backend
from sym.util import banded_jacobian, check_transforms
from .core import NeqSys, _ensure_3args
def _map2(cb, iterable):
if cb is None: # identity function is assumed
return iterable
else:
return map(cb, iterable)
def _map2l(cb, iterable): # Py2 type of map in Py3
return list(_map2(cb, iterable))
class SymbolicSys(NeqSys):
""" Symbolically defined system of non-linear equations.
This object is analogous to :class:`pyneqsys.NeqSys` but instead of
providing a callable, the user provides symbolic expressions.
Parameters
----------
x : iterable of Symbols
exprs : iterable of expressions for ``f``
params : iterable of Symbols (optional)
list of symbols appearing in exprs which are parameters
jac : ImmutableMatrix or bool
If ``True``:
- Calculate Jacobian from ``exprs``.
If ``False``:
- Do not compute Jacobian (numeric approximation).
If ImmutableMatrix:
- User provided expressions for the Jacobian.
backend : str or sym.Backend
See documentation of `sym.Backend \
<https://pythonhosted.org/sym/sym.html#sym.backend.Backend>`_.
module : str
``module`` keyword argument passed to ``backend.Lambdify``.
\\*\\*kwargs:
See :py:class:`pyneqsys.core.NeqSys`.
Examples
--------
>>> import sympy as sp
>>> e = sp.exp
>>> x = x0, x1 = sp.symbols('x:2')
>>> params = a, b = sp.symbols('a b')
>>> neqsys = SymbolicSys(x, [a*(1 - x0), b*(x1 - x0**2)], params)
>>> xout, sol = neqsys.solve('scipy', [-10, -5], [1, 10])
>>> print(xout)
[ 1. 1.]
>>> print(neqsys.get_jac()[0, 0])
-a
Notes
-----
When using SymPy as the backend, a limited number of unknowns is supported.
The reason is that (currently) ``sympy.lambdify`` has an upper limit on
number of arguments.
"""
def __init__(self, x, exprs, params=(), jac=True, backend=None, **kwargs):
self.x = x
self.exprs = exprs
self.params = params
self._jac = jac
self.be = Backend(backend)
self.nf, self.nx = len(exprs), len(x) # needed by get_*_cb
self.band = kwargs.get('band', None) # needed by get_*_cb
self.module = kwargs.pop('module', 'numpy')
super(SymbolicSys, self).__init__(self.nf, self.nx,
self._get_f_cb(),
self._get_j_cb(),
**kwargs)
@classmethod
def from_callback(cls, cb, nx=None, nparams=None, **kwargs):
""" Generate a SymbolicSys instance from a callback.
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
nx : int
Number of unknowns, when not given it is deduced from ``kwargs['names']``.
nparams : int
Number of parameters, when not given it is deduced from ``kwargs['param_names']``.
\\*\\*kwargs :
Keyword arguments passed on to :class:`SymbolicSys`. See also :class:`pyneqsys.NeqSys`.
Examples
--------
>>> symbolicsys = SymbolicSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], 2, 1)
...
"""
if kwargs.get('x_by_name', False):
if 'names' not in kwargs:
raise ValueError("Need ``names`` in kwargs.")
if nx is None:
nx = len(kwargs['names'])
elif nx != len(kwargs['names']):
raise ValueError("Inconsistency between nx and length of ``names``.")
if kwargs.get('par_by_name', False):
if 'param_names' not in kwargs:
raise ValueError("Need ``param_names`` in kwargs.")
if nparams is None:
nparams = len(kwargs['param_names'])
elif nparams != len(kwargs['param_names']):
raise ValueError("Inconsistency between ``nparam`` and length of ``param_names``.")
if nparams is None:
nparams = 0
if nx is None:
raise ValueError("Need ``nx`` of ``names`` together with ``x_by_name==True``.")
be = Backend(kwargs.pop('backend', None))
x = be.real_symarray('x', nx)
p = be.real_symarray('p', nparams)
_x = dict(zip(kwargs['names'], x)) if kwargs.get('x_by_name', False) else x
_p = dict(zip(kwargs['param_names'], p)) if kwargs.get('par_by_name', False) else p
try:
exprs = cb(_x, _p, be)
except TypeError:
exprs = _ensure_3args(cb)(_x, _p, be)
return cls(x, exprs, p, backend=be, **kwargs)
def get_jac(self):
""" Return the jacobian of the expressions """
if self._jac is True:
if self.band is None:
f = self.be.Matrix(self.nf, 1, self.exprs)
_x = self.be.Matrix(self.nx, 1, self.x)
return f.jacobian(_x)
else:
# Banded
return self.be.Matrix(banded_jacobian(
self.exprs, self.x, *self.band))
elif self._jac is False:
return False
else:
return self._jac
def _get_f_cb(self):
args = list(chain(self.x, self.params))
kw = dict(module=self.module, dtype=object if self.module == 'mpmath' else None)
try:
cb = self.be.Lambdify(args, self.exprs, **kw)
except TypeError:
cb = self.be.Lambdify(args, self.exprs)
def f(x, params):
return cb(np.concatenate((x, params), axis=-1))
return f
def _get_j_cb(self):
args = list(chain(self.x, self.params))
kw = dict(module=self.module, dtype=object if self.module == 'mpmath' else None)
try:
cb = self.be.Lambdify(args, self.get_jac(), **kw)
except TypeError:
cb = self.be.Lambdify(args, self.get_jac())
def j(x, params):
return cb(np.concatenate((x, params), axis=-1))
return j
_use_symbol_latex_names = True
def _repr_latex_(self): # pretty printing in Jupyter notebook
from ._sympy import NeqSysTexPrinter
if self.latex_names and (self.latex_param_names if len(self.params) else True):
pretty = {s: n for s, n in chain(
zip(self.x, self.latex_names) if self._use_symbol_latex_names else [],
zip(self.params, self.latex_param_names)
)}
else:
pretty = {}
return '$%s$' % NeqSysTexPrinter(dict(symbol_names=pretty)).doprint(self.exprs)
class TransformedSys(SymbolicSys):
""" A system which transforms the equations and variables internally
Can be used to reformulate a problem in a numerically more stable form.
Parameters
----------
x : iterable of variables
exprs : iterable of expressions
Expressions to find root for (untransformed).
transf : iterable of pairs of expressions
Forward, backward transformed instances of x.
params : iterable of symbols
post_adj : callable (default: None)
To tweak expression after transformation.
\\*\\*kwargs :
Keyword arguments passed onto :class:`SymbolicSys`.
"""
_use_symbol_latex_names = False # symbols have been transformed
def __init__(self, x, exprs, transf, params=(), post_adj=None, **kwargs):
self.fw, self.bw = zip(*transf)
check_transforms(self.fw, self.bw, x)
exprs = [e.subs(zip(x, self.fw)) for e in exprs]
super(TransformedSys, self).__init__(
x, _map2l(post_adj, exprs), params,
pre_processors=[lambda xarr, params: (self.bw_cb(xarr), params)],
post_processors=[lambda xarr, params: (self.fw_cb(xarr), params)],
**kwargs)
self.fw_cb = self.be.Lambdify(x, self.fw)
self.bw_cb = self.be.Lambdify(x, self.bw)
@classmethod
def from_callback(cls, cb, transf_cbs, nx, nparams=0, pre_adj=None,
**kwargs):
""" Generate a TransformedSys instance from a callback
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
The callback ``cb`` should return *untransformed* expressions.
transf_cbs : pair or iterable of pairs of callables
Callables for forward- and backward-transformations. Each
callable should take a single parameter (expression) and
return a single expression.
nx : int
Number of unkowns.
nparams : int
Number of parameters.
pre_adj : callable, optional
To tweak expression prior to transformation. Takes a
sinlge argument (expression) and return a single argument
rewritten expression.
\\*\\*kwargs :
Keyword arguments passed on to :class:`TransformedSys`. See also
:class:`SymbolicSys` and :class:`pyneqsys.NeqSys`.
Examples
--------
>>> import sympy as sp
>>> transformed = TransformedSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], (sp.log, sp.exp), 2, 1)
...
"""
be = Backend(kwargs.pop('backend', None))
x = be.real_symarray('x', nx)
p = be.real_symarray('p', nparams)
try:
transf = [(transf_cbs[idx][0](xi),
transf_cbs[idx][1](xi))
for idx, xi in enumerate(x)]
except TypeError:
transf = zip(_map2(transf_cbs[0], x), _map2(transf_cbs[1], x))
try:
exprs = cb(x, p, be)
except TypeError:
exprs = _ensure_3args(cb)(x, p, be)
return cls(x, _map2l(pre_adj, exprs), transf, p, backend=be, **kwargs)
def linear_rref(A, b, Matrix=None, S=None):
""" Transform a linear system to reduced row-echelon form
Transforms both the matrix and right-hand side of a linear
system of equations to reduced row echelon form
Parameters
----------
A : Matrix-like
Iterable of rows.
b : iterable
Returns
-------
A', b' - transformed versions
"""
if Matrix is None:
from sympy import Matrix
if S is None:
from sympy import S
mat_rows = [_map2l(S, list(row) + [v]) for row, v in zip(A, b)]
aug = Matrix(mat_rows)
raug, pivot = aug.rref()
nindep = len(pivot)
return raug[:nindep, :-1], raug[:nindep, -1]
|
bjodah/pyneqsys | pyneqsys/symbolic.py | SymbolicSys.from_callback | python | def from_callback(cls, cb, nx=None, nparams=None, **kwargs):
if kwargs.get('x_by_name', False):
if 'names' not in kwargs:
raise ValueError("Need ``names`` in kwargs.")
if nx is None:
nx = len(kwargs['names'])
elif nx != len(kwargs['names']):
raise ValueError("Inconsistency between nx and length of ``names``.")
if kwargs.get('par_by_name', False):
if 'param_names' not in kwargs:
raise ValueError("Need ``param_names`` in kwargs.")
if nparams is None:
nparams = len(kwargs['param_names'])
elif nparams != len(kwargs['param_names']):
raise ValueError("Inconsistency between ``nparam`` and length of ``param_names``.")
if nparams is None:
nparams = 0
if nx is None:
raise ValueError("Need ``nx`` of ``names`` together with ``x_by_name==True``.")
be = Backend(kwargs.pop('backend', None))
x = be.real_symarray('x', nx)
p = be.real_symarray('p', nparams)
_x = dict(zip(kwargs['names'], x)) if kwargs.get('x_by_name', False) else x
_p = dict(zip(kwargs['param_names'], p)) if kwargs.get('par_by_name', False) else p
try:
exprs = cb(_x, _p, be)
except TypeError:
exprs = _ensure_3args(cb)(_x, _p, be)
return cls(x, exprs, p, backend=be, **kwargs) | Generate a SymbolicSys instance from a callback.
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
nx : int
Number of unknowns, when not given it is deduced from ``kwargs['names']``.
nparams : int
Number of parameters, when not given it is deduced from ``kwargs['param_names']``.
\\*\\*kwargs :
Keyword arguments passed on to :class:`SymbolicSys`. See also :class:`pyneqsys.NeqSys`.
Examples
--------
>>> symbolicsys = SymbolicSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], 2, 1)
... | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/symbolic.py#L88-L141 | null | class SymbolicSys(NeqSys):
""" Symbolically defined system of non-linear equations.
This object is analogous to :class:`pyneqsys.NeqSys` but instead of
providing a callable, the user provides symbolic expressions.
Parameters
----------
x : iterable of Symbols
exprs : iterable of expressions for ``f``
params : iterable of Symbols (optional)
list of symbols appearing in exprs which are parameters
jac : ImmutableMatrix or bool
If ``True``:
- Calculate Jacobian from ``exprs``.
If ``False``:
- Do not compute Jacobian (numeric approximation).
If ImmutableMatrix:
- User provided expressions for the Jacobian.
backend : str or sym.Backend
See documentation of `sym.Backend \
<https://pythonhosted.org/sym/sym.html#sym.backend.Backend>`_.
module : str
``module`` keyword argument passed to ``backend.Lambdify``.
\\*\\*kwargs:
See :py:class:`pyneqsys.core.NeqSys`.
Examples
--------
>>> import sympy as sp
>>> e = sp.exp
>>> x = x0, x1 = sp.symbols('x:2')
>>> params = a, b = sp.symbols('a b')
>>> neqsys = SymbolicSys(x, [a*(1 - x0), b*(x1 - x0**2)], params)
>>> xout, sol = neqsys.solve('scipy', [-10, -5], [1, 10])
>>> print(xout)
[ 1. 1.]
>>> print(neqsys.get_jac()[0, 0])
-a
Notes
-----
When using SymPy as the backend, a limited number of unknowns is supported.
The reason is that (currently) ``sympy.lambdify`` has an upper limit on
number of arguments.
"""
def __init__(self, x, exprs, params=(), jac=True, backend=None, **kwargs):
self.x = x
self.exprs = exprs
self.params = params
self._jac = jac
self.be = Backend(backend)
self.nf, self.nx = len(exprs), len(x) # needed by get_*_cb
self.band = kwargs.get('band', None) # needed by get_*_cb
self.module = kwargs.pop('module', 'numpy')
super(SymbolicSys, self).__init__(self.nf, self.nx,
self._get_f_cb(),
self._get_j_cb(),
**kwargs)
@classmethod
def get_jac(self):
""" Return the jacobian of the expressions """
if self._jac is True:
if self.band is None:
f = self.be.Matrix(self.nf, 1, self.exprs)
_x = self.be.Matrix(self.nx, 1, self.x)
return f.jacobian(_x)
else:
# Banded
return self.be.Matrix(banded_jacobian(
self.exprs, self.x, *self.band))
elif self._jac is False:
return False
else:
return self._jac
def _get_f_cb(self):
args = list(chain(self.x, self.params))
kw = dict(module=self.module, dtype=object if self.module == 'mpmath' else None)
try:
cb = self.be.Lambdify(args, self.exprs, **kw)
except TypeError:
cb = self.be.Lambdify(args, self.exprs)
def f(x, params):
return cb(np.concatenate((x, params), axis=-1))
return f
def _get_j_cb(self):
args = list(chain(self.x, self.params))
kw = dict(module=self.module, dtype=object if self.module == 'mpmath' else None)
try:
cb = self.be.Lambdify(args, self.get_jac(), **kw)
except TypeError:
cb = self.be.Lambdify(args, self.get_jac())
def j(x, params):
return cb(np.concatenate((x, params), axis=-1))
return j
_use_symbol_latex_names = True
def _repr_latex_(self): # pretty printing in Jupyter notebook
from ._sympy import NeqSysTexPrinter
if self.latex_names and (self.latex_param_names if len(self.params) else True):
pretty = {s: n for s, n in chain(
zip(self.x, self.latex_names) if self._use_symbol_latex_names else [],
zip(self.params, self.latex_param_names)
)}
else:
pretty = {}
return '$%s$' % NeqSysTexPrinter(dict(symbol_names=pretty)).doprint(self.exprs)
|
bjodah/pyneqsys | pyneqsys/symbolic.py | SymbolicSys.get_jac | python | def get_jac(self):
if self._jac is True:
if self.band is None:
f = self.be.Matrix(self.nf, 1, self.exprs)
_x = self.be.Matrix(self.nx, 1, self.x)
return f.jacobian(_x)
else:
# Banded
return self.be.Matrix(banded_jacobian(
self.exprs, self.x, *self.band))
elif self._jac is False:
return False
else:
return self._jac | Return the jacobian of the expressions | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/symbolic.py#L143-L157 | null | class SymbolicSys(NeqSys):
""" Symbolically defined system of non-linear equations.
This object is analogous to :class:`pyneqsys.NeqSys` but instead of
providing a callable, the user provides symbolic expressions.
Parameters
----------
x : iterable of Symbols
exprs : iterable of expressions for ``f``
params : iterable of Symbols (optional)
list of symbols appearing in exprs which are parameters
jac : ImmutableMatrix or bool
If ``True``:
- Calculate Jacobian from ``exprs``.
If ``False``:
- Do not compute Jacobian (numeric approximation).
If ImmutableMatrix:
- User provided expressions for the Jacobian.
backend : str or sym.Backend
See documentation of `sym.Backend \
<https://pythonhosted.org/sym/sym.html#sym.backend.Backend>`_.
module : str
``module`` keyword argument passed to ``backend.Lambdify``.
\\*\\*kwargs:
See :py:class:`pyneqsys.core.NeqSys`.
Examples
--------
>>> import sympy as sp
>>> e = sp.exp
>>> x = x0, x1 = sp.symbols('x:2')
>>> params = a, b = sp.symbols('a b')
>>> neqsys = SymbolicSys(x, [a*(1 - x0), b*(x1 - x0**2)], params)
>>> xout, sol = neqsys.solve('scipy', [-10, -5], [1, 10])
>>> print(xout)
[ 1. 1.]
>>> print(neqsys.get_jac()[0, 0])
-a
Notes
-----
When using SymPy as the backend, a limited number of unknowns is supported.
The reason is that (currently) ``sympy.lambdify`` has an upper limit on
number of arguments.
"""
def __init__(self, x, exprs, params=(), jac=True, backend=None, **kwargs):
self.x = x
self.exprs = exprs
self.params = params
self._jac = jac
self.be = Backend(backend)
self.nf, self.nx = len(exprs), len(x) # needed by get_*_cb
self.band = kwargs.get('band', None) # needed by get_*_cb
self.module = kwargs.pop('module', 'numpy')
super(SymbolicSys, self).__init__(self.nf, self.nx,
self._get_f_cb(),
self._get_j_cb(),
**kwargs)
@classmethod
def from_callback(cls, cb, nx=None, nparams=None, **kwargs):
""" Generate a SymbolicSys instance from a callback.
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
nx : int
Number of unknowns, when not given it is deduced from ``kwargs['names']``.
nparams : int
Number of parameters, when not given it is deduced from ``kwargs['param_names']``.
\\*\\*kwargs :
Keyword arguments passed on to :class:`SymbolicSys`. See also :class:`pyneqsys.NeqSys`.
Examples
--------
>>> symbolicsys = SymbolicSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], 2, 1)
...
"""
if kwargs.get('x_by_name', False):
if 'names' not in kwargs:
raise ValueError("Need ``names`` in kwargs.")
if nx is None:
nx = len(kwargs['names'])
elif nx != len(kwargs['names']):
raise ValueError("Inconsistency between nx and length of ``names``.")
if kwargs.get('par_by_name', False):
if 'param_names' not in kwargs:
raise ValueError("Need ``param_names`` in kwargs.")
if nparams is None:
nparams = len(kwargs['param_names'])
elif nparams != len(kwargs['param_names']):
raise ValueError("Inconsistency between ``nparam`` and length of ``param_names``.")
if nparams is None:
nparams = 0
if nx is None:
raise ValueError("Need ``nx`` of ``names`` together with ``x_by_name==True``.")
be = Backend(kwargs.pop('backend', None))
x = be.real_symarray('x', nx)
p = be.real_symarray('p', nparams)
_x = dict(zip(kwargs['names'], x)) if kwargs.get('x_by_name', False) else x
_p = dict(zip(kwargs['param_names'], p)) if kwargs.get('par_by_name', False) else p
try:
exprs = cb(_x, _p, be)
except TypeError:
exprs = _ensure_3args(cb)(_x, _p, be)
return cls(x, exprs, p, backend=be, **kwargs)
def _get_f_cb(self):
args = list(chain(self.x, self.params))
kw = dict(module=self.module, dtype=object if self.module == 'mpmath' else None)
try:
cb = self.be.Lambdify(args, self.exprs, **kw)
except TypeError:
cb = self.be.Lambdify(args, self.exprs)
def f(x, params):
return cb(np.concatenate((x, params), axis=-1))
return f
def _get_j_cb(self):
args = list(chain(self.x, self.params))
kw = dict(module=self.module, dtype=object if self.module == 'mpmath' else None)
try:
cb = self.be.Lambdify(args, self.get_jac(), **kw)
except TypeError:
cb = self.be.Lambdify(args, self.get_jac())
def j(x, params):
return cb(np.concatenate((x, params), axis=-1))
return j
_use_symbol_latex_names = True
def _repr_latex_(self): # pretty printing in Jupyter notebook
from ._sympy import NeqSysTexPrinter
if self.latex_names and (self.latex_param_names if len(self.params) else True):
pretty = {s: n for s, n in chain(
zip(self.x, self.latex_names) if self._use_symbol_latex_names else [],
zip(self.params, self.latex_param_names)
)}
else:
pretty = {}
return '$%s$' % NeqSysTexPrinter(dict(symbol_names=pretty)).doprint(self.exprs)
|
bjodah/pyneqsys | pyneqsys/symbolic.py | TransformedSys.from_callback | python | def from_callback(cls, cb, transf_cbs, nx, nparams=0, pre_adj=None,
**kwargs):
be = Backend(kwargs.pop('backend', None))
x = be.real_symarray('x', nx)
p = be.real_symarray('p', nparams)
try:
transf = [(transf_cbs[idx][0](xi),
transf_cbs[idx][1](xi))
for idx, xi in enumerate(x)]
except TypeError:
transf = zip(_map2(transf_cbs[0], x), _map2(transf_cbs[1], x))
try:
exprs = cb(x, p, be)
except TypeError:
exprs = _ensure_3args(cb)(x, p, be)
return cls(x, _map2l(pre_adj, exprs), transf, p, backend=be, **kwargs) | Generate a TransformedSys instance from a callback
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
The callback ``cb`` should return *untransformed* expressions.
transf_cbs : pair or iterable of pairs of callables
Callables for forward- and backward-transformations. Each
callable should take a single parameter (expression) and
return a single expression.
nx : int
Number of unkowns.
nparams : int
Number of parameters.
pre_adj : callable, optional
To tweak expression prior to transformation. Takes a
sinlge argument (expression) and return a single argument
rewritten expression.
\\*\\*kwargs :
Keyword arguments passed on to :class:`TransformedSys`. See also
:class:`SymbolicSys` and :class:`pyneqsys.NeqSys`.
Examples
--------
>>> import sympy as sp
>>> transformed = TransformedSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], (sp.log, sp.exp), 2, 1)
... | train | https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/symbolic.py#L232-L281 | [
"def f(x, params):\n # docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html\n return [x[0] + (x[0] - x[1])**params[0]/2 - 1,\n (x[1] - x[0])**params[0]/2 + x[1]]\n",
"def _ensure_3args(func):\n if func is None:\n return None\n self_arg = 1 if inspect.ismethod(func) else 0\n if hasattr(inspect, 'getfullargspec'):\n args = inspect.getfullargspec(func)[0]\n else: # Python 2:\n args = inspect.getargspec(func)[0]\n\n if len(args) == 3 + self_arg:\n return func\n if len(args) == 2 + self_arg:\n return lambda x, params=(), backend=math: func(x, params)\n elif len(args) == 1 + self_arg:\n return lambda x, params=(), backend=math: func(x)\n else:\n raise ValueError(\"Incorrect numer of arguments\")\n",
"def _map2(cb, iterable):\n if cb is None: # identity function is assumed\n return iterable\n else:\n return map(cb, iterable)\n",
"def _map2l(cb, iterable): # Py2 type of map in Py3\n return list(_map2(cb, iterable))\n",
"return lambda x, params=(), backend=math: func(x, params)\n",
"return lambda x, params=(), backend=math: func(x)\n"
] | class TransformedSys(SymbolicSys):
""" A system which transforms the equations and variables internally
Can be used to reformulate a problem in a numerically more stable form.
Parameters
----------
x : iterable of variables
exprs : iterable of expressions
Expressions to find root for (untransformed).
transf : iterable of pairs of expressions
Forward, backward transformed instances of x.
params : iterable of symbols
post_adj : callable (default: None)
To tweak expression after transformation.
\\*\\*kwargs :
Keyword arguments passed onto :class:`SymbolicSys`.
"""
_use_symbol_latex_names = False # symbols have been transformed
def __init__(self, x, exprs, transf, params=(), post_adj=None, **kwargs):
self.fw, self.bw = zip(*transf)
check_transforms(self.fw, self.bw, x)
exprs = [e.subs(zip(x, self.fw)) for e in exprs]
super(TransformedSys, self).__init__(
x, _map2l(post_adj, exprs), params,
pre_processors=[lambda xarr, params: (self.bw_cb(xarr), params)],
post_processors=[lambda xarr, params: (self.fw_cb(xarr), params)],
**kwargs)
self.fw_cb = self.be.Lambdify(x, self.fw)
self.bw_cb = self.be.Lambdify(x, self.bw)
@classmethod
|
EmbodiedCognition/py-c3d | c3d.py | Header.write | python | def write(self, handle):
'''Write binary header data to a file handle.
This method writes exactly 512 bytes to the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be written to describe the parameters in this Header. The
handle must be writeable.
'''
handle.seek(0)
handle.write(struct.pack(self.BINARY_FORMAT,
self.parameter_block,
0x50,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.analog_per_frame,
self.frame_rate,
b'',
self.long_event_labels and 0x3039 or 0x0,
self.label_block,
b'')) | Write binary header data to a file handle.
This method writes exactly 512 bytes to the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be written to describe the parameters in this Header. The
handle must be writeable. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L89-L118 | null | class Header(object):
'''Header information from a C3D file.
Attributes
----------
label_block : int
Index of the 512-byte block where labels (metadata) are found.
parameter_block : int
Index of the 512-byte block where parameters (metadata) are found.
data_block : int
Index of the 512-byte block where data starts.
point_count : int
Number of motion capture channels recorded in this file.
analog_count : int
Number of analog values recorded per frame of 3D point data.
first_frame : int
Index of the first frame of data.
last_frame : int
Index of the last frame of data.
analog_per_frame : int
Number of analog frames per frame of 3D point data. The analog frame
rate (ANALOG:RATE) apparently equals the point frame rate (POINT:RATE)
times this value.
frame_rate : float
The frame rate of the recording, in frames per second.
scale_factor : float
Multiply values in the file by this scale parameter.
long_event_labels : bool
max_gap : int
.. note::
The ``scale_factor`` attribute is not used in Phasespace C3D files;
instead, use the POINT.SCALE parameter.
.. note::
The ``first_frame`` and ``last_frame`` header attributes are not used in
C3D files generated by Phasespace. Instead, the first and last
frame numbers are stored in the POINTS:ACTUAL_START_FIELD and
POINTS:ACTUAL_END_FIELD parameters.
'''
BINARY_FORMAT = '<BBHHHHHfHHf270sHH214s'
def __init__(self, handle=None):
'''Create a new Header object.
Parameters
----------
handle : file handle, optional
If given, initialize attributes for the Header from this file
handle. The handle must be seek-able and readable. If `handle` is
not given, Header attributes are initialized with default values.
'''
self.label_block = 0
self.parameter_block = 2
self.data_block = 3
self.point_count = 50
self.analog_count = 0
self.first_frame = 1
self.last_frame = 1
self.analog_per_frame = 0
self.frame_rate = 60.0
self.max_gap = 0
self.scale_factor = -1.0
self.long_event_labels = False
if handle:
self.read(handle)
def __str__(self):
'''Return a string representation of this Header's attributes.'''
return '''\
parameter_block: {0.parameter_block}
point_count: {0.point_count}
analog_count: {0.analog_count}
first_frame: {0.first_frame}
last_frame: {0.last_frame}
max_gap: {0.max_gap}
scale_factor: {0.scale_factor}
data_block: {0.data_block}
analog_per_frame: {0.analog_per_frame}
frame_rate: {0.frame_rate}
long_event_labels: {0.long_event_labels}
label_block: {0.label_block}'''.format(self)
def read(self, handle):
'''Read and parse binary header data from a file handle.
This method reads exactly 512 bytes from the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be read to initialize the attributes in this Header. The handle
must be readable.
Raises
------
AssertionError
If the magic byte from the header is not 80 (the C3D magic value).
'''
handle.seek(0)
(self.parameter_block,
magic,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.analog_per_frame,
self.frame_rate,
_,
self.long_event_labels,
self.label_block,
_) = struct.unpack(self.BINARY_FORMAT, handle.read(512))
assert magic == 80, 'C3D magic {} != 80 !'.format(magic)
|
EmbodiedCognition/py-c3d | c3d.py | Header.read | python | def read(self, handle):
'''Read and parse binary header data from a file handle.
This method reads exactly 512 bytes from the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be read to initialize the attributes in this Header. The handle
must be readable.
Raises
------
AssertionError
If the magic byte from the header is not 80 (the C3D magic value).
'''
handle.seek(0)
(self.parameter_block,
magic,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.analog_per_frame,
self.frame_rate,
_,
self.long_event_labels,
self.label_block,
_) = struct.unpack(self.BINARY_FORMAT, handle.read(512))
assert magic == 80, 'C3D magic {} != 80 !'.format(magic) | Read and parse binary header data from a file handle.
This method reads exactly 512 bytes from the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be read to initialize the attributes in this Header. The handle
must be readable.
Raises
------
AssertionError
If the magic byte from the header is not 80 (the C3D magic value). | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L136-L171 | null | class Header(object):
'''Header information from a C3D file.
Attributes
----------
label_block : int
Index of the 512-byte block where labels (metadata) are found.
parameter_block : int
Index of the 512-byte block where parameters (metadata) are found.
data_block : int
Index of the 512-byte block where data starts.
point_count : int
Number of motion capture channels recorded in this file.
analog_count : int
Number of analog values recorded per frame of 3D point data.
first_frame : int
Index of the first frame of data.
last_frame : int
Index of the last frame of data.
analog_per_frame : int
Number of analog frames per frame of 3D point data. The analog frame
rate (ANALOG:RATE) apparently equals the point frame rate (POINT:RATE)
times this value.
frame_rate : float
The frame rate of the recording, in frames per second.
scale_factor : float
Multiply values in the file by this scale parameter.
long_event_labels : bool
max_gap : int
.. note::
The ``scale_factor`` attribute is not used in Phasespace C3D files;
instead, use the POINT.SCALE parameter.
.. note::
The ``first_frame`` and ``last_frame`` header attributes are not used in
C3D files generated by Phasespace. Instead, the first and last
frame numbers are stored in the POINTS:ACTUAL_START_FIELD and
POINTS:ACTUAL_END_FIELD parameters.
'''
BINARY_FORMAT = '<BBHHHHHfHHf270sHH214s'
def __init__(self, handle=None):
'''Create a new Header object.
Parameters
----------
handle : file handle, optional
If given, initialize attributes for the Header from this file
handle. The handle must be seek-able and readable. If `handle` is
not given, Header attributes are initialized with default values.
'''
self.label_block = 0
self.parameter_block = 2
self.data_block = 3
self.point_count = 50
self.analog_count = 0
self.first_frame = 1
self.last_frame = 1
self.analog_per_frame = 0
self.frame_rate = 60.0
self.max_gap = 0
self.scale_factor = -1.0
self.long_event_labels = False
if handle:
self.read(handle)
def write(self, handle):
'''Write binary header data to a file handle.
This method writes exactly 512 bytes to the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be written to describe the parameters in this Header. The
handle must be writeable.
'''
handle.seek(0)
handle.write(struct.pack(self.BINARY_FORMAT,
self.parameter_block,
0x50,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.analog_per_frame,
self.frame_rate,
b'',
self.long_event_labels and 0x3039 or 0x0,
self.label_block,
b''))
def __str__(self):
'''Return a string representation of this Header's attributes.'''
return '''\
parameter_block: {0.parameter_block}
point_count: {0.point_count}
analog_count: {0.analog_count}
first_frame: {0.first_frame}
last_frame: {0.last_frame}
max_gap: {0.max_gap}
scale_factor: {0.scale_factor}
data_block: {0.data_block}
analog_per_frame: {0.analog_per_frame}
frame_rate: {0.frame_rate}
long_event_labels: {0.long_event_labels}
label_block: {0.label_block}'''.format(self)
|
EmbodiedCognition/py-c3d | c3d.py | Param.binary_size | python | def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
1 + # data size
1 + len(self.dimensions) + # size of dimensions and dimension bytes
self.total_bytes + # data
1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes
) | Return the number of bytes needed to store this parameter. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L227-L237 | null | class Param(object):
'''A class representing a single named parameter from a C3D file.
Attributes
----------
name : str
Name of this parameter.
desc : str
Brief description of this parameter.
bytes_per_element : int, optional
For array data, this describes the size of each element of data. For
string data (including arrays of strings), this should be -1.
dimensions : list of int
For array data, this describes the dimensions of the array, stored in
column-major order. For arrays of strings, the dimensions here will be
the number of columns (length of each string) followed by the number of
rows (number of strings).
bytes : str
Raw data for this parameter.
'''
def __init__(self,
name,
desc='',
bytes_per_element=1,
dimensions=None,
bytes=b'',
handle=None):
'''Set up a new parameter, only the name is required.'''
self.name = name
self.desc = desc
self.bytes_per_element = bytes_per_element
self.dimensions = dimensions or []
self.bytes = bytes
if handle:
self.read(handle)
def __repr__(self):
return '<Param: {}>'.format(self.desc)
@property
def num_elements(self):
'''Return the number of elements in this parameter's array value.'''
e = 1
for d in self.dimensions:
e *= d
return e
@property
def total_bytes(self):
'''Return the number of bytes used for storing this parameter's data.'''
return self.num_elements * abs(self.bytes_per_element)
def write(self, group_id, handle):
'''Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
handle.write(struct.pack('bb', len(name), group_id))
handle.write(name)
handle.write(struct.pack('<h', self.binary_size() - 2 - len(name)))
handle.write(struct.pack('b', self.bytes_per_element))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
desc = self.desc.encode('utf-8')
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
def read(self, handle):
'''Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter.
'''
self.bytes_per_element, = struct.unpack('b', handle.read(1))
dims, = struct.unpack('B', handle.read(1))
self.dimensions = [struct.unpack('B', handle.read(1))[0] for _ in range(dims)]
self.bytes = b''
if self.total_bytes:
self.bytes = handle.read(self.total_bytes)
size, = struct.unpack('B', handle.read(1))
self.desc = size and handle.read(size).decode('utf-8') or ''
def _as(self, fmt):
'''Unpack the raw bytes of this param using the given struct format.'''
return struct.unpack('<' + fmt, self.bytes)[0]
@property
def int8_value(self):
'''Get the param as an 8-bit signed integer.'''
return self._as('b')
@property
def uint8_value(self):
'''Get the param as an 8-bit unsigned integer.'''
return self._as('B')
@property
def int16_value(self):
'''Get the param as a 16-bit signed integer.'''
return self._as('h')
@property
def uint16_value(self):
'''Get the param as a 16-bit unsigned integer.'''
return self._as('H')
@property
def int32_value(self):
'''Get the param as a 32-bit signed integer.'''
return self._as('i')
@property
def uint32_value(self):
'''Get the param as a 32-bit unsigned integer.'''
return self._as('I')
@property
def float_value(self):
'''Get the param as a 32-bit float.'''
return self._as('f')
@property
def bytes_value(self):
'''Get the param as a raw byte string.'''
return self.bytes
@property
def string_value(self):
'''Get the param as a unicode string.'''
return self.bytes.decode('utf-8')
def _as_array(self, fmt):
'''Unpack the raw bytes of this param using the given data format.'''
assert self.dimensions, \
'{}: cannot get value as {} array!'.format(self.name, fmt)
elems = array.array(fmt)
elems.fromstring(self.bytes)
return np.array(elems).reshape(self.dimensions)
@property
def int8_array(self):
'''Get the param as an array of 8-bit signed integers.'''
return self._as_array('b')
@property
def uint8_array(self):
'''Get the param as an array of 8-bit unsigned integers.'''
return self._as_array('B')
@property
def int16_array(self):
'''Get the param as an array of 16-bit signed integers.'''
return self._as_array('h')
@property
def uint16_array(self):
'''Get the param as an array of 16-bit unsigned integers.'''
return self._as_array('H')
@property
def int32_array(self):
'''Get the param as an array of 32-bit signed integers.'''
return self._as_array('i')
@property
def uint32_array(self):
'''Get the param as an array of 32-bit unsigned integers.'''
return self._as_array('I')
@property
def float_array(self):
'''Get the param as an array of 32-bit floats.'''
return self._as_array('f')
@property
def bytes_array(self):
'''Get the param as an array of raw byte strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as bytes array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l] for i in range(n)]
@property
def string_array(self):
'''Get the param as a array of unicode strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as string array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l].decode('utf-8') for i in range(n)]
|
EmbodiedCognition/py-c3d | c3d.py | Param.write | python | def write(self, group_id, handle):
'''Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
handle.write(struct.pack('bb', len(name), group_id))
handle.write(name)
handle.write(struct.pack('<h', self.binary_size() - 2 - len(name)))
handle.write(struct.pack('b', self.bytes_per_element))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
desc = self.desc.encode('utf-8')
handle.write(struct.pack('B', len(desc)))
handle.write(desc) | Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L239-L260 | [
"def binary_size(self):\n '''Return the number of bytes needed to store this parameter.'''\n return (\n 1 + # group_id\n 2 + # next offset marker\n 1 + len(self.name.encode('utf-8')) + # size of name and name bytes\n 1 + # data size\n 1 + len(self.dimensions) + # size of dimensions and dimension bytes\n self.total_bytes + # data\n 1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes\n )\n"
] | class Param(object):
'''A class representing a single named parameter from a C3D file.
Attributes
----------
name : str
Name of this parameter.
desc : str
Brief description of this parameter.
bytes_per_element : int, optional
For array data, this describes the size of each element of data. For
string data (including arrays of strings), this should be -1.
dimensions : list of int
For array data, this describes the dimensions of the array, stored in
column-major order. For arrays of strings, the dimensions here will be
the number of columns (length of each string) followed by the number of
rows (number of strings).
bytes : str
Raw data for this parameter.
'''
def __init__(self,
name,
desc='',
bytes_per_element=1,
dimensions=None,
bytes=b'',
handle=None):
'''Set up a new parameter, only the name is required.'''
self.name = name
self.desc = desc
self.bytes_per_element = bytes_per_element
self.dimensions = dimensions or []
self.bytes = bytes
if handle:
self.read(handle)
def __repr__(self):
return '<Param: {}>'.format(self.desc)
@property
def num_elements(self):
'''Return the number of elements in this parameter's array value.'''
e = 1
for d in self.dimensions:
e *= d
return e
@property
def total_bytes(self):
'''Return the number of bytes used for storing this parameter's data.'''
return self.num_elements * abs(self.bytes_per_element)
def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
1 + # data size
1 + len(self.dimensions) + # size of dimensions and dimension bytes
self.total_bytes + # data
1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes
)
def read(self, handle):
'''Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter.
'''
self.bytes_per_element, = struct.unpack('b', handle.read(1))
dims, = struct.unpack('B', handle.read(1))
self.dimensions = [struct.unpack('B', handle.read(1))[0] for _ in range(dims)]
self.bytes = b''
if self.total_bytes:
self.bytes = handle.read(self.total_bytes)
size, = struct.unpack('B', handle.read(1))
self.desc = size and handle.read(size).decode('utf-8') or ''
def _as(self, fmt):
'''Unpack the raw bytes of this param using the given struct format.'''
return struct.unpack('<' + fmt, self.bytes)[0]
@property
def int8_value(self):
'''Get the param as an 8-bit signed integer.'''
return self._as('b')
@property
def uint8_value(self):
'''Get the param as an 8-bit unsigned integer.'''
return self._as('B')
@property
def int16_value(self):
'''Get the param as a 16-bit signed integer.'''
return self._as('h')
@property
def uint16_value(self):
'''Get the param as a 16-bit unsigned integer.'''
return self._as('H')
@property
def int32_value(self):
'''Get the param as a 32-bit signed integer.'''
return self._as('i')
@property
def uint32_value(self):
'''Get the param as a 32-bit unsigned integer.'''
return self._as('I')
@property
def float_value(self):
'''Get the param as a 32-bit float.'''
return self._as('f')
@property
def bytes_value(self):
'''Get the param as a raw byte string.'''
return self.bytes
@property
def string_value(self):
'''Get the param as a unicode string.'''
return self.bytes.decode('utf-8')
def _as_array(self, fmt):
'''Unpack the raw bytes of this param using the given data format.'''
assert self.dimensions, \
'{}: cannot get value as {} array!'.format(self.name, fmt)
elems = array.array(fmt)
elems.fromstring(self.bytes)
return np.array(elems).reshape(self.dimensions)
@property
def int8_array(self):
'''Get the param as an array of 8-bit signed integers.'''
return self._as_array('b')
@property
def uint8_array(self):
'''Get the param as an array of 8-bit unsigned integers.'''
return self._as_array('B')
@property
def int16_array(self):
'''Get the param as an array of 16-bit signed integers.'''
return self._as_array('h')
@property
def uint16_array(self):
'''Get the param as an array of 16-bit unsigned integers.'''
return self._as_array('H')
@property
def int32_array(self):
'''Get the param as an array of 32-bit signed integers.'''
return self._as_array('i')
@property
def uint32_array(self):
'''Get the param as an array of 32-bit unsigned integers.'''
return self._as_array('I')
@property
def float_array(self):
'''Get the param as an array of 32-bit floats.'''
return self._as_array('f')
@property
def bytes_array(self):
'''Get the param as an array of raw byte strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as bytes array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l] for i in range(n)]
@property
def string_array(self):
'''Get the param as a array of unicode strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as string array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l].decode('utf-8') for i in range(n)]
|
EmbodiedCognition/py-c3d | c3d.py | Param.read | python | def read(self, handle):
'''Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter.
'''
self.bytes_per_element, = struct.unpack('b', handle.read(1))
dims, = struct.unpack('B', handle.read(1))
self.dimensions = [struct.unpack('B', handle.read(1))[0] for _ in range(dims)]
self.bytes = b''
if self.total_bytes:
self.bytes = handle.read(self.total_bytes)
size, = struct.unpack('B', handle.read(1))
self.desc = size and handle.read(size).decode('utf-8') or '' | Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L262-L275 | null | class Param(object):
'''A class representing a single named parameter from a C3D file.
Attributes
----------
name : str
Name of this parameter.
desc : str
Brief description of this parameter.
bytes_per_element : int, optional
For array data, this describes the size of each element of data. For
string data (including arrays of strings), this should be -1.
dimensions : list of int
For array data, this describes the dimensions of the array, stored in
column-major order. For arrays of strings, the dimensions here will be
the number of columns (length of each string) followed by the number of
rows (number of strings).
bytes : str
Raw data for this parameter.
'''
def __init__(self,
name,
desc='',
bytes_per_element=1,
dimensions=None,
bytes=b'',
handle=None):
'''Set up a new parameter, only the name is required.'''
self.name = name
self.desc = desc
self.bytes_per_element = bytes_per_element
self.dimensions = dimensions or []
self.bytes = bytes
if handle:
self.read(handle)
def __repr__(self):
return '<Param: {}>'.format(self.desc)
@property
def num_elements(self):
'''Return the number of elements in this parameter's array value.'''
e = 1
for d in self.dimensions:
e *= d
return e
@property
def total_bytes(self):
'''Return the number of bytes used for storing this parameter's data.'''
return self.num_elements * abs(self.bytes_per_element)
def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
1 + # data size
1 + len(self.dimensions) + # size of dimensions and dimension bytes
self.total_bytes + # data
1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes
)
def write(self, group_id, handle):
'''Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
handle.write(struct.pack('bb', len(name), group_id))
handle.write(name)
handle.write(struct.pack('<h', self.binary_size() - 2 - len(name)))
handle.write(struct.pack('b', self.bytes_per_element))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
desc = self.desc.encode('utf-8')
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
def _as(self, fmt):
'''Unpack the raw bytes of this param using the given struct format.'''
return struct.unpack('<' + fmt, self.bytes)[0]
@property
def int8_value(self):
'''Get the param as an 8-bit signed integer.'''
return self._as('b')
@property
def uint8_value(self):
'''Get the param as an 8-bit unsigned integer.'''
return self._as('B')
@property
def int16_value(self):
'''Get the param as a 16-bit signed integer.'''
return self._as('h')
@property
def uint16_value(self):
'''Get the param as a 16-bit unsigned integer.'''
return self._as('H')
@property
def int32_value(self):
'''Get the param as a 32-bit signed integer.'''
return self._as('i')
@property
def uint32_value(self):
'''Get the param as a 32-bit unsigned integer.'''
return self._as('I')
@property
def float_value(self):
'''Get the param as a 32-bit float.'''
return self._as('f')
@property
def bytes_value(self):
'''Get the param as a raw byte string.'''
return self.bytes
@property
def string_value(self):
'''Get the param as a unicode string.'''
return self.bytes.decode('utf-8')
def _as_array(self, fmt):
'''Unpack the raw bytes of this param using the given data format.'''
assert self.dimensions, \
'{}: cannot get value as {} array!'.format(self.name, fmt)
elems = array.array(fmt)
elems.fromstring(self.bytes)
return np.array(elems).reshape(self.dimensions)
@property
def int8_array(self):
'''Get the param as an array of 8-bit signed integers.'''
return self._as_array('b')
@property
def uint8_array(self):
'''Get the param as an array of 8-bit unsigned integers.'''
return self._as_array('B')
@property
def int16_array(self):
'''Get the param as an array of 16-bit signed integers.'''
return self._as_array('h')
@property
def uint16_array(self):
'''Get the param as an array of 16-bit unsigned integers.'''
return self._as_array('H')
@property
def int32_array(self):
'''Get the param as an array of 32-bit signed integers.'''
return self._as_array('i')
@property
def uint32_array(self):
'''Get the param as an array of 32-bit unsigned integers.'''
return self._as_array('I')
@property
def float_array(self):
'''Get the param as an array of 32-bit floats.'''
return self._as_array('f')
@property
def bytes_array(self):
'''Get the param as an array of raw byte strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as bytes array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l] for i in range(n)]
@property
def string_array(self):
'''Get the param as a array of unicode strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as string array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l].decode('utf-8') for i in range(n)]
|
EmbodiedCognition/py-c3d | c3d.py | Param._as_array | python | def _as_array(self, fmt):
'''Unpack the raw bytes of this param using the given data format.'''
assert self.dimensions, \
'{}: cannot get value as {} array!'.format(self.name, fmt)
elems = array.array(fmt)
elems.fromstring(self.bytes)
return np.array(elems).reshape(self.dimensions) | Unpack the raw bytes of this param using the given data format. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L326-L332 | null | class Param(object):
'''A class representing a single named parameter from a C3D file.
Attributes
----------
name : str
Name of this parameter.
desc : str
Brief description of this parameter.
bytes_per_element : int, optional
For array data, this describes the size of each element of data. For
string data (including arrays of strings), this should be -1.
dimensions : list of int
For array data, this describes the dimensions of the array, stored in
column-major order. For arrays of strings, the dimensions here will be
the number of columns (length of each string) followed by the number of
rows (number of strings).
bytes : str
Raw data for this parameter.
'''
def __init__(self,
name,
desc='',
bytes_per_element=1,
dimensions=None,
bytes=b'',
handle=None):
'''Set up a new parameter, only the name is required.'''
self.name = name
self.desc = desc
self.bytes_per_element = bytes_per_element
self.dimensions = dimensions or []
self.bytes = bytes
if handle:
self.read(handle)
def __repr__(self):
return '<Param: {}>'.format(self.desc)
@property
def num_elements(self):
'''Return the number of elements in this parameter's array value.'''
e = 1
for d in self.dimensions:
e *= d
return e
@property
def total_bytes(self):
'''Return the number of bytes used for storing this parameter's data.'''
return self.num_elements * abs(self.bytes_per_element)
def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
1 + # data size
1 + len(self.dimensions) + # size of dimensions and dimension bytes
self.total_bytes + # data
1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes
)
def write(self, group_id, handle):
'''Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
handle.write(struct.pack('bb', len(name), group_id))
handle.write(name)
handle.write(struct.pack('<h', self.binary_size() - 2 - len(name)))
handle.write(struct.pack('b', self.bytes_per_element))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
desc = self.desc.encode('utf-8')
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
def read(self, handle):
'''Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter.
'''
self.bytes_per_element, = struct.unpack('b', handle.read(1))
dims, = struct.unpack('B', handle.read(1))
self.dimensions = [struct.unpack('B', handle.read(1))[0] for _ in range(dims)]
self.bytes = b''
if self.total_bytes:
self.bytes = handle.read(self.total_bytes)
size, = struct.unpack('B', handle.read(1))
self.desc = size and handle.read(size).decode('utf-8') or ''
def _as(self, fmt):
'''Unpack the raw bytes of this param using the given struct format.'''
return struct.unpack('<' + fmt, self.bytes)[0]
@property
def int8_value(self):
'''Get the param as an 8-bit signed integer.'''
return self._as('b')
@property
def uint8_value(self):
'''Get the param as an 8-bit unsigned integer.'''
return self._as('B')
@property
def int16_value(self):
'''Get the param as a 16-bit signed integer.'''
return self._as('h')
@property
def uint16_value(self):
'''Get the param as a 16-bit unsigned integer.'''
return self._as('H')
@property
def int32_value(self):
'''Get the param as a 32-bit signed integer.'''
return self._as('i')
@property
def uint32_value(self):
'''Get the param as a 32-bit unsigned integer.'''
return self._as('I')
@property
def float_value(self):
'''Get the param as a 32-bit float.'''
return self._as('f')
@property
def bytes_value(self):
'''Get the param as a raw byte string.'''
return self.bytes
@property
def string_value(self):
'''Get the param as a unicode string.'''
return self.bytes.decode('utf-8')
@property
def int8_array(self):
'''Get the param as an array of 8-bit signed integers.'''
return self._as_array('b')
@property
def uint8_array(self):
'''Get the param as an array of 8-bit unsigned integers.'''
return self._as_array('B')
@property
def int16_array(self):
'''Get the param as an array of 16-bit signed integers.'''
return self._as_array('h')
@property
def uint16_array(self):
'''Get the param as an array of 16-bit unsigned integers.'''
return self._as_array('H')
@property
def int32_array(self):
'''Get the param as an array of 32-bit signed integers.'''
return self._as_array('i')
@property
def uint32_array(self):
'''Get the param as an array of 32-bit unsigned integers.'''
return self._as_array('I')
@property
def float_array(self):
'''Get the param as an array of 32-bit floats.'''
return self._as_array('f')
@property
def bytes_array(self):
'''Get the param as an array of raw byte strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as bytes array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l] for i in range(n)]
@property
def string_array(self):
'''Get the param as a array of unicode strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as string array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l].decode('utf-8') for i in range(n)]
|
EmbodiedCognition/py-c3d | c3d.py | Param.bytes_array | python | def bytes_array(self):
'''Get the param as an array of raw byte strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as bytes array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l] for i in range(n)] | Get the param as an array of raw byte strings. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L370-L375 | null | class Param(object):
'''A class representing a single named parameter from a C3D file.
Attributes
----------
name : str
Name of this parameter.
desc : str
Brief description of this parameter.
bytes_per_element : int, optional
For array data, this describes the size of each element of data. For
string data (including arrays of strings), this should be -1.
dimensions : list of int
For array data, this describes the dimensions of the array, stored in
column-major order. For arrays of strings, the dimensions here will be
the number of columns (length of each string) followed by the number of
rows (number of strings).
bytes : str
Raw data for this parameter.
'''
def __init__(self,
name,
desc='',
bytes_per_element=1,
dimensions=None,
bytes=b'',
handle=None):
'''Set up a new parameter, only the name is required.'''
self.name = name
self.desc = desc
self.bytes_per_element = bytes_per_element
self.dimensions = dimensions or []
self.bytes = bytes
if handle:
self.read(handle)
def __repr__(self):
return '<Param: {}>'.format(self.desc)
@property
def num_elements(self):
'''Return the number of elements in this parameter's array value.'''
e = 1
for d in self.dimensions:
e *= d
return e
@property
def total_bytes(self):
'''Return the number of bytes used for storing this parameter's data.'''
return self.num_elements * abs(self.bytes_per_element)
def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
1 + # data size
1 + len(self.dimensions) + # size of dimensions and dimension bytes
self.total_bytes + # data
1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes
)
def write(self, group_id, handle):
'''Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
handle.write(struct.pack('bb', len(name), group_id))
handle.write(name)
handle.write(struct.pack('<h', self.binary_size() - 2 - len(name)))
handle.write(struct.pack('b', self.bytes_per_element))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
desc = self.desc.encode('utf-8')
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
def read(self, handle):
'''Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter.
'''
self.bytes_per_element, = struct.unpack('b', handle.read(1))
dims, = struct.unpack('B', handle.read(1))
self.dimensions = [struct.unpack('B', handle.read(1))[0] for _ in range(dims)]
self.bytes = b''
if self.total_bytes:
self.bytes = handle.read(self.total_bytes)
size, = struct.unpack('B', handle.read(1))
self.desc = size and handle.read(size).decode('utf-8') or ''
def _as(self, fmt):
'''Unpack the raw bytes of this param using the given struct format.'''
return struct.unpack('<' + fmt, self.bytes)[0]
@property
def int8_value(self):
'''Get the param as an 8-bit signed integer.'''
return self._as('b')
@property
def uint8_value(self):
'''Get the param as an 8-bit unsigned integer.'''
return self._as('B')
@property
def int16_value(self):
'''Get the param as a 16-bit signed integer.'''
return self._as('h')
@property
def uint16_value(self):
'''Get the param as a 16-bit unsigned integer.'''
return self._as('H')
@property
def int32_value(self):
'''Get the param as a 32-bit signed integer.'''
return self._as('i')
@property
def uint32_value(self):
'''Get the param as a 32-bit unsigned integer.'''
return self._as('I')
@property
def float_value(self):
'''Get the param as a 32-bit float.'''
return self._as('f')
@property
def bytes_value(self):
'''Get the param as a raw byte string.'''
return self.bytes
@property
def string_value(self):
'''Get the param as a unicode string.'''
return self.bytes.decode('utf-8')
def _as_array(self, fmt):
'''Unpack the raw bytes of this param using the given data format.'''
assert self.dimensions, \
'{}: cannot get value as {} array!'.format(self.name, fmt)
elems = array.array(fmt)
elems.fromstring(self.bytes)
return np.array(elems).reshape(self.dimensions)
@property
def int8_array(self):
'''Get the param as an array of 8-bit signed integers.'''
return self._as_array('b')
@property
def uint8_array(self):
'''Get the param as an array of 8-bit unsigned integers.'''
return self._as_array('B')
@property
def int16_array(self):
'''Get the param as an array of 16-bit signed integers.'''
return self._as_array('h')
@property
def uint16_array(self):
'''Get the param as an array of 16-bit unsigned integers.'''
return self._as_array('H')
@property
def int32_array(self):
'''Get the param as an array of 32-bit signed integers.'''
return self._as_array('i')
@property
def uint32_array(self):
'''Get the param as an array of 32-bit unsigned integers.'''
return self._as_array('I')
@property
def float_array(self):
'''Get the param as an array of 32-bit floats.'''
return self._as_array('f')
@property
@property
def string_array(self):
'''Get the param as a array of unicode strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as string array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l].decode('utf-8') for i in range(n)]
|
EmbodiedCognition/py-c3d | c3d.py | Param.string_array | python | def string_array(self):
'''Get the param as a array of unicode strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as string array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l].decode('utf-8') for i in range(n)] | Get the param as a array of unicode strings. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L378-L383 | null | class Param(object):
'''A class representing a single named parameter from a C3D file.
Attributes
----------
name : str
Name of this parameter.
desc : str
Brief description of this parameter.
bytes_per_element : int, optional
For array data, this describes the size of each element of data. For
string data (including arrays of strings), this should be -1.
dimensions : list of int
For array data, this describes the dimensions of the array, stored in
column-major order. For arrays of strings, the dimensions here will be
the number of columns (length of each string) followed by the number of
rows (number of strings).
bytes : str
Raw data for this parameter.
'''
def __init__(self,
name,
desc='',
bytes_per_element=1,
dimensions=None,
bytes=b'',
handle=None):
'''Set up a new parameter, only the name is required.'''
self.name = name
self.desc = desc
self.bytes_per_element = bytes_per_element
self.dimensions = dimensions or []
self.bytes = bytes
if handle:
self.read(handle)
def __repr__(self):
return '<Param: {}>'.format(self.desc)
@property
def num_elements(self):
'''Return the number of elements in this parameter's array value.'''
e = 1
for d in self.dimensions:
e *= d
return e
@property
def total_bytes(self):
'''Return the number of bytes used for storing this parameter's data.'''
return self.num_elements * abs(self.bytes_per_element)
def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
1 + # data size
1 + len(self.dimensions) + # size of dimensions and dimension bytes
self.total_bytes + # data
1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes
)
def write(self, group_id, handle):
'''Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
handle.write(struct.pack('bb', len(name), group_id))
handle.write(name)
handle.write(struct.pack('<h', self.binary_size() - 2 - len(name)))
handle.write(struct.pack('b', self.bytes_per_element))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
desc = self.desc.encode('utf-8')
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
def read(self, handle):
'''Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter.
'''
self.bytes_per_element, = struct.unpack('b', handle.read(1))
dims, = struct.unpack('B', handle.read(1))
self.dimensions = [struct.unpack('B', handle.read(1))[0] for _ in range(dims)]
self.bytes = b''
if self.total_bytes:
self.bytes = handle.read(self.total_bytes)
size, = struct.unpack('B', handle.read(1))
self.desc = size and handle.read(size).decode('utf-8') or ''
def _as(self, fmt):
'''Unpack the raw bytes of this param using the given struct format.'''
return struct.unpack('<' + fmt, self.bytes)[0]
@property
def int8_value(self):
'''Get the param as an 8-bit signed integer.'''
return self._as('b')
@property
def uint8_value(self):
'''Get the param as an 8-bit unsigned integer.'''
return self._as('B')
@property
def int16_value(self):
'''Get the param as a 16-bit signed integer.'''
return self._as('h')
@property
def uint16_value(self):
'''Get the param as a 16-bit unsigned integer.'''
return self._as('H')
@property
def int32_value(self):
'''Get the param as a 32-bit signed integer.'''
return self._as('i')
@property
def uint32_value(self):
'''Get the param as a 32-bit unsigned integer.'''
return self._as('I')
@property
def float_value(self):
'''Get the param as a 32-bit float.'''
return self._as('f')
@property
def bytes_value(self):
'''Get the param as a raw byte string.'''
return self.bytes
@property
def string_value(self):
'''Get the param as a unicode string.'''
return self.bytes.decode('utf-8')
def _as_array(self, fmt):
'''Unpack the raw bytes of this param using the given data format.'''
assert self.dimensions, \
'{}: cannot get value as {} array!'.format(self.name, fmt)
elems = array.array(fmt)
elems.fromstring(self.bytes)
return np.array(elems).reshape(self.dimensions)
@property
def int8_array(self):
'''Get the param as an array of 8-bit signed integers.'''
return self._as_array('b')
@property
def uint8_array(self):
'''Get the param as an array of 8-bit unsigned integers.'''
return self._as_array('B')
@property
def int16_array(self):
'''Get the param as an array of 16-bit signed integers.'''
return self._as_array('h')
@property
def uint16_array(self):
'''Get the param as an array of 16-bit unsigned integers.'''
return self._as_array('H')
@property
def int32_array(self):
'''Get the param as an array of 32-bit signed integers.'''
return self._as_array('i')
@property
def uint32_array(self):
'''Get the param as an array of 32-bit unsigned integers.'''
return self._as_array('I')
@property
def float_array(self):
'''Get the param as an array of 32-bit floats.'''
return self._as_array('f')
@property
def bytes_array(self):
'''Get the param as an array of raw byte strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as bytes array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l] for i in range(n)]
@property
|
EmbodiedCognition/py-c3d | c3d.py | Group.add_param | python | def add_param(self, name, **kwargs):
'''Add a parameter to this group.
Parameters
----------
name : str
Name of the parameter to add to this group. The name will
automatically be case-normalized.
Additional keyword arguments will be passed to the `Param` constructor.
'''
self.params[name.upper()] = Param(name.upper(), **kwargs) | Add a parameter to this group.
Parameters
----------
name : str
Name of the parameter to add to this group. The name will
automatically be case-normalized.
Additional keyword arguments will be passed to the `Param` constructor. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L425-L436 | null | class Group(object):
'''A group of parameters from a C3D file.
In C3D files, parameters are organized in groups. Each group has a name, a
description, and a set of named parameters.
Attributes
----------
name : str
Name of this parameter group.
desc : str
Description for this parameter group.
'''
def __init__(self, name=None, desc=None):
self.name = name
self.desc = desc
self.params = {}
def __repr__(self):
return '<Group: {}>'.format(self.desc)
def get(self, key, default=None):
'''Get a parameter by key.
Parameters
----------
key : any
Parameter key to look up in this group.
default : any, optional
Value to return if the key is not found. Defaults to None.
Returns
-------
param : :class:`Param`
A parameter from the current group.
'''
return self.params.get(key, default)
def binary_size(self):
'''Return the number of bytes to store this group and its parameters.'''
return (
1 + # group_id
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
2 + # next offset marker
1 + len(self.desc.encode('utf-8')) + # size of desc and desc bytes
sum(p.binary_size() for p in self.params.values()))
def write(self, group_id, handle):
'''Write this parameter group, with parameters, to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
desc = self.desc.encode('utf-8')
handle.write(struct.pack('bb', len(name), -group_id))
handle.write(name)
handle.write(struct.pack('<h', 3 + len(desc)))
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
for param in self.params.values():
param.write(group_id, handle)
def get_int8(self, key):
'''Get the value of the given parameter as an 8-bit signed integer.'''
return self.params[key.upper()].int8_value
def get_uint8(self, key):
'''Get the value of the given parameter as an 8-bit unsigned integer.'''
return self.params[key.upper()].uint8_value
def get_int16(self, key):
'''Get the value of the given parameter as a 16-bit signed integer.'''
return self.params[key.upper()].int16_value
def get_uint16(self, key):
'''Get the value of the given parameter as a 16-bit unsigned integer.'''
return self.params[key.upper()].uint16_value
def get_int32(self, key):
'''Get the value of the given parameter as a 32-bit signed integer.'''
return self.params[key.upper()].int32_value
def get_uint32(self, key):
'''Get the value of the given parameter as a 32-bit unsigned integer.'''
return self.params[key.upper()].uint32_value
def get_float(self, key):
'''Get the value of the given parameter as a 32-bit float.'''
return self.params[key.upper()].float_value
def get_bytes(self, key):
'''Get the value of the given parameter as a byte array.'''
return self.params[key.upper()].bytes_value
def get_string(self, key):
'''Get the value of the given parameter as a string.'''
return self.params[key.upper()].string_value
|
EmbodiedCognition/py-c3d | c3d.py | Group.binary_size | python | def binary_size(self):
'''Return the number of bytes to store this group and its parameters.'''
return (
1 + # group_id
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
2 + # next offset marker
1 + len(self.desc.encode('utf-8')) + # size of desc and desc bytes
sum(p.binary_size() for p in self.params.values())) | Return the number of bytes to store this group and its parameters. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L438-L445 | null | class Group(object):
'''A group of parameters from a C3D file.
In C3D files, parameters are organized in groups. Each group has a name, a
description, and a set of named parameters.
Attributes
----------
name : str
Name of this parameter group.
desc : str
Description for this parameter group.
'''
def __init__(self, name=None, desc=None):
self.name = name
self.desc = desc
self.params = {}
def __repr__(self):
return '<Group: {}>'.format(self.desc)
def get(self, key, default=None):
'''Get a parameter by key.
Parameters
----------
key : any
Parameter key to look up in this group.
default : any, optional
Value to return if the key is not found. Defaults to None.
Returns
-------
param : :class:`Param`
A parameter from the current group.
'''
return self.params.get(key, default)
def add_param(self, name, **kwargs):
'''Add a parameter to this group.
Parameters
----------
name : str
Name of the parameter to add to this group. The name will
automatically be case-normalized.
Additional keyword arguments will be passed to the `Param` constructor.
'''
self.params[name.upper()] = Param(name.upper(), **kwargs)
def write(self, group_id, handle):
'''Write this parameter group, with parameters, to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
desc = self.desc.encode('utf-8')
handle.write(struct.pack('bb', len(name), -group_id))
handle.write(name)
handle.write(struct.pack('<h', 3 + len(desc)))
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
for param in self.params.values():
param.write(group_id, handle)
def get_int8(self, key):
'''Get the value of the given parameter as an 8-bit signed integer.'''
return self.params[key.upper()].int8_value
def get_uint8(self, key):
'''Get the value of the given parameter as an 8-bit unsigned integer.'''
return self.params[key.upper()].uint8_value
def get_int16(self, key):
'''Get the value of the given parameter as a 16-bit signed integer.'''
return self.params[key.upper()].int16_value
def get_uint16(self, key):
'''Get the value of the given parameter as a 16-bit unsigned integer.'''
return self.params[key.upper()].uint16_value
def get_int32(self, key):
'''Get the value of the given parameter as a 32-bit signed integer.'''
return self.params[key.upper()].int32_value
def get_uint32(self, key):
'''Get the value of the given parameter as a 32-bit unsigned integer.'''
return self.params[key.upper()].uint32_value
def get_float(self, key):
'''Get the value of the given parameter as a 32-bit float.'''
return self.params[key.upper()].float_value
def get_bytes(self, key):
'''Get the value of the given parameter as a byte array.'''
return self.params[key.upper()].bytes_value
def get_string(self, key):
'''Get the value of the given parameter as a string.'''
return self.params[key.upper()].string_value
|
EmbodiedCognition/py-c3d | c3d.py | Group.write | python | def write(self, group_id, handle):
'''Write this parameter group, with parameters, to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
desc = self.desc.encode('utf-8')
handle.write(struct.pack('bb', len(name), -group_id))
handle.write(name)
handle.write(struct.pack('<h', 3 + len(desc)))
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
for param in self.params.values():
param.write(group_id, handle) | Write this parameter group, with parameters, to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group.
handle : file handle
An open, writable, binary file handle. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L447-L465 | null | class Group(object):
'''A group of parameters from a C3D file.
In C3D files, parameters are organized in groups. Each group has a name, a
description, and a set of named parameters.
Attributes
----------
name : str
Name of this parameter group.
desc : str
Description for this parameter group.
'''
def __init__(self, name=None, desc=None):
self.name = name
self.desc = desc
self.params = {}
def __repr__(self):
return '<Group: {}>'.format(self.desc)
def get(self, key, default=None):
'''Get a parameter by key.
Parameters
----------
key : any
Parameter key to look up in this group.
default : any, optional
Value to return if the key is not found. Defaults to None.
Returns
-------
param : :class:`Param`
A parameter from the current group.
'''
return self.params.get(key, default)
def add_param(self, name, **kwargs):
'''Add a parameter to this group.
Parameters
----------
name : str
Name of the parameter to add to this group. The name will
automatically be case-normalized.
Additional keyword arguments will be passed to the `Param` constructor.
'''
self.params[name.upper()] = Param(name.upper(), **kwargs)
def binary_size(self):
'''Return the number of bytes to store this group and its parameters.'''
return (
1 + # group_id
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
2 + # next offset marker
1 + len(self.desc.encode('utf-8')) + # size of desc and desc bytes
sum(p.binary_size() for p in self.params.values()))
def get_int8(self, key):
'''Get the value of the given parameter as an 8-bit signed integer.'''
return self.params[key.upper()].int8_value
def get_uint8(self, key):
'''Get the value of the given parameter as an 8-bit unsigned integer.'''
return self.params[key.upper()].uint8_value
def get_int16(self, key):
'''Get the value of the given parameter as a 16-bit signed integer.'''
return self.params[key.upper()].int16_value
def get_uint16(self, key):
'''Get the value of the given parameter as a 16-bit unsigned integer.'''
return self.params[key.upper()].uint16_value
def get_int32(self, key):
'''Get the value of the given parameter as a 32-bit signed integer.'''
return self.params[key.upper()].int32_value
def get_uint32(self, key):
'''Get the value of the given parameter as a 32-bit unsigned integer.'''
return self.params[key.upper()].uint32_value
def get_float(self, key):
'''Get the value of the given parameter as a 32-bit float.'''
return self.params[key.upper()].float_value
def get_bytes(self, key):
'''Get the value of the given parameter as a byte array.'''
return self.params[key.upper()].bytes_value
def get_string(self, key):
'''Get the value of the given parameter as a string.'''
return self.params[key.upper()].string_value
|
EmbodiedCognition/py-c3d | c3d.py | Manager.check_metadata | python | def check_metadata(self):
'''Ensure that the metadata in our file is self-consistent.'''
assert self.header.point_count == self.point_used, (
'inconsistent point count! {} header != {} POINT:USED'.format(
self.header.point_count,
self.point_used,
))
assert self.header.scale_factor == self.point_scale, (
'inconsistent scale factor! {} header != {} POINT:SCALE'.format(
self.header.scale_factor,
self.point_scale,
))
assert self.header.frame_rate == self.point_rate, (
'inconsistent frame rate! {} header != {} POINT:RATE'.format(
self.header.frame_rate,
self.point_rate,
))
ratio = self.analog_rate / self.point_rate
assert True or self.header.analog_per_frame == ratio, (
'inconsistent analog rate! {} header != {} analog-fps / {} point-fps'.format(
self.header.analog_per_frame,
self.analog_rate,
self.point_rate,
))
count = self.analog_used * self.header.analog_per_frame
assert True or self.header.analog_count == count, (
'inconsistent analog count! {} header != {} analog used * {} per-frame'.format(
self.header.analog_count,
self.analog_used,
self.header.analog_per_frame,
))
start = self.get_uint16('POINT:DATA_START')
assert self.header.data_block == start, (
'inconsistent data block! {} header != {} POINT:DATA_START'.format(
self.header.data_block, start))
for name in ('POINT:LABELS', 'POINT:DESCRIPTIONS',
'ANALOG:LABELS', 'ANALOG:DESCRIPTIONS'):
if self.get(name) is None:
warnings.warn('missing parameter {}'.format(name)) | Ensure that the metadata in our file is self-consistent. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L522-L566 | [
"def get(self, group, default=None):\n '''Get a group or parameter.\n\n Parameters\n ----------\n group : str\n If this string contains a period (.), then the part before the\n period will be used to retrieve a group, and the part after the\n period will be used to retrieve a parameter from that group. If this\n string does not contain a period, then just a group will be\n returned.\n default : any\n Return this value if the named group and parameter are not found.\n\n Returns\n -------\n value : :class:`Group` or :class:`Param`\n Either a group or parameter with the specified name(s). If neither\n is found, returns the default value.\n '''\n if isinstance(group, int):\n return self.groups.get(group, default)\n group = group.upper()\n param = None\n if '.' in group:\n group, param = group.split('.', 1)\n if ':' in group:\n group, param = group.split(':', 1)\n if group not in self.groups:\n return default\n group = self.groups[group]\n if param is not None:\n return group.get(param, default)\n return group\n",
"def get_uint16(self, key):\n '''Get a parameter value as a 16-bit unsigned integer.'''\n return self.get(key).uint16_value\n"
] | class Manager(object):
'''A base class for managing C3D file metadata.
This class manages a C3D header (which contains some stock metadata fields)
as well as a set of parameter groups. Each group is accessible using its
name.
Attributes
----------
header : `Header`
Header information for the C3D file.
'''
def __init__(self, header=None):
'''Set up a new Manager with a Header.'''
self.header = header or Header()
self.groups = {}
def add_group(self, group_id, name, desc):
'''Add a new parameter group.
Parameters
----------
group_id : int
The numeric ID for a group to check or create.
name : str, optional
If a group is created, assign this name to the group.
desc : str, optional
If a group is created, assign this description to the group.
Returns
-------
group : :class:`Group`
A group with the given ID, name, and description.
Raises
------
KeyError
If a group with a duplicate ID or name already exists.
'''
if group_id in self.groups:
raise KeyError(group_id)
name = name.upper()
if name in self.groups:
raise KeyError(name)
group = self.groups[name] = self.groups[group_id] = Group(name, desc)
return group
def get(self, group, default=None):
'''Get a group or parameter.
Parameters
----------
group : str
If this string contains a period (.), then the part before the
period will be used to retrieve a group, and the part after the
period will be used to retrieve a parameter from that group. If this
string does not contain a period, then just a group will be
returned.
default : any
Return this value if the named group and parameter are not found.
Returns
-------
value : :class:`Group` or :class:`Param`
Either a group or parameter with the specified name(s). If neither
is found, returns the default value.
'''
if isinstance(group, int):
return self.groups.get(group, default)
group = group.upper()
param = None
if '.' in group:
group, param = group.split('.', 1)
if ':' in group:
group, param = group.split(':', 1)
if group not in self.groups:
return default
group = self.groups[group]
if param is not None:
return group.get(param, default)
return group
def get_int8(self, key):
'''Get a parameter value as an 8-bit signed integer.'''
return self.get(key).int8_value
def get_uint8(self, key):
'''Get a parameter value as an 8-bit unsigned integer.'''
return self.get(key).uint8_value
def get_int16(self, key):
'''Get a parameter value as a 16-bit signed integer.'''
return self.get(key).int16_value
def get_uint16(self, key):
'''Get a parameter value as a 16-bit unsigned integer.'''
return self.get(key).uint16_value
def get_int32(self, key):
'''Get a parameter value as a 32-bit signed integer.'''
return self.get(key).int32_value
def get_uint32(self, key):
'''Get a parameter value as a 32-bit unsigned integer.'''
return self.get(key).uint32_value
def get_float(self, key):
'''Get a parameter value as a 32-bit float.'''
return self.get(key).float_value
def get_bytes(self, key):
'''Get a parameter value as a byte string.'''
return self.get(key).bytes_value
def get_string(self, key):
'''Get a parameter value as a string.'''
return self.get(key).string_value
def parameter_blocks(self):
'''Compute the size (in 512B blocks) of the parameter section.'''
bytes = 4. + sum(g.binary_size() for g in self.groups.values())
return int(np.ceil(bytes / 512))
@property
def point_rate(self):
return self.get_float('POINT:RATE')
@property
def point_scale(self):
return self.get_float('POINT:SCALE')
@property
def point_used(self):
return self.get_uint16('POINT:USED')
@property
def analog_used(self):
try:
return self.get_uint16('ANALOG:USED')
except AttributeError:
return 0
@property
def analog_rate(self):
try:
return self.get_float('ANALOG:RATE')
except AttributeError:
return 0
@property
def point_labels(self):
return self.get('POINT:LABELS').string_array
@property
def analog_labels(self):
return self.get('ANALOG:LABELS').string_array
def first_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_START_FIELD')
if param is not None:
return param.int32_value
return self.header.first_frame
def last_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_END_FIELD')
if param is not None:
return param.int32_value
return self.header.last_frame
|
EmbodiedCognition/py-c3d | c3d.py | Manager.add_group | python | def add_group(self, group_id, name, desc):
'''Add a new parameter group.
Parameters
----------
group_id : int
The numeric ID for a group to check or create.
name : str, optional
If a group is created, assign this name to the group.
desc : str, optional
If a group is created, assign this description to the group.
Returns
-------
group : :class:`Group`
A group with the given ID, name, and description.
Raises
------
KeyError
If a group with a duplicate ID or name already exists.
'''
if group_id in self.groups:
raise KeyError(group_id)
name = name.upper()
if name in self.groups:
raise KeyError(name)
group = self.groups[name] = self.groups[group_id] = Group(name, desc)
return group | Add a new parameter group.
Parameters
----------
group_id : int
The numeric ID for a group to check or create.
name : str, optional
If a group is created, assign this name to the group.
desc : str, optional
If a group is created, assign this description to the group.
Returns
-------
group : :class:`Group`
A group with the given ID, name, and description.
Raises
------
KeyError
If a group with a duplicate ID or name already exists. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L568-L596 | null | class Manager(object):
'''A base class for managing C3D file metadata.
This class manages a C3D header (which contains some stock metadata fields)
as well as a set of parameter groups. Each group is accessible using its
name.
Attributes
----------
header : `Header`
Header information for the C3D file.
'''
def __init__(self, header=None):
'''Set up a new Manager with a Header.'''
self.header = header or Header()
self.groups = {}
def check_metadata(self):
'''Ensure that the metadata in our file is self-consistent.'''
assert self.header.point_count == self.point_used, (
'inconsistent point count! {} header != {} POINT:USED'.format(
self.header.point_count,
self.point_used,
))
assert self.header.scale_factor == self.point_scale, (
'inconsistent scale factor! {} header != {} POINT:SCALE'.format(
self.header.scale_factor,
self.point_scale,
))
assert self.header.frame_rate == self.point_rate, (
'inconsistent frame rate! {} header != {} POINT:RATE'.format(
self.header.frame_rate,
self.point_rate,
))
ratio = self.analog_rate / self.point_rate
assert True or self.header.analog_per_frame == ratio, (
'inconsistent analog rate! {} header != {} analog-fps / {} point-fps'.format(
self.header.analog_per_frame,
self.analog_rate,
self.point_rate,
))
count = self.analog_used * self.header.analog_per_frame
assert True or self.header.analog_count == count, (
'inconsistent analog count! {} header != {} analog used * {} per-frame'.format(
self.header.analog_count,
self.analog_used,
self.header.analog_per_frame,
))
start = self.get_uint16('POINT:DATA_START')
assert self.header.data_block == start, (
'inconsistent data block! {} header != {} POINT:DATA_START'.format(
self.header.data_block, start))
for name in ('POINT:LABELS', 'POINT:DESCRIPTIONS',
'ANALOG:LABELS', 'ANALOG:DESCRIPTIONS'):
if self.get(name) is None:
warnings.warn('missing parameter {}'.format(name))
def get(self, group, default=None):
'''Get a group or parameter.
Parameters
----------
group : str
If this string contains a period (.), then the part before the
period will be used to retrieve a group, and the part after the
period will be used to retrieve a parameter from that group. If this
string does not contain a period, then just a group will be
returned.
default : any
Return this value if the named group and parameter are not found.
Returns
-------
value : :class:`Group` or :class:`Param`
Either a group or parameter with the specified name(s). If neither
is found, returns the default value.
'''
if isinstance(group, int):
return self.groups.get(group, default)
group = group.upper()
param = None
if '.' in group:
group, param = group.split('.', 1)
if ':' in group:
group, param = group.split(':', 1)
if group not in self.groups:
return default
group = self.groups[group]
if param is not None:
return group.get(param, default)
return group
def get_int8(self, key):
'''Get a parameter value as an 8-bit signed integer.'''
return self.get(key).int8_value
def get_uint8(self, key):
'''Get a parameter value as an 8-bit unsigned integer.'''
return self.get(key).uint8_value
def get_int16(self, key):
'''Get a parameter value as a 16-bit signed integer.'''
return self.get(key).int16_value
def get_uint16(self, key):
'''Get a parameter value as a 16-bit unsigned integer.'''
return self.get(key).uint16_value
def get_int32(self, key):
'''Get a parameter value as a 32-bit signed integer.'''
return self.get(key).int32_value
def get_uint32(self, key):
'''Get a parameter value as a 32-bit unsigned integer.'''
return self.get(key).uint32_value
def get_float(self, key):
'''Get a parameter value as a 32-bit float.'''
return self.get(key).float_value
def get_bytes(self, key):
'''Get a parameter value as a byte string.'''
return self.get(key).bytes_value
def get_string(self, key):
'''Get a parameter value as a string.'''
return self.get(key).string_value
def parameter_blocks(self):
'''Compute the size (in 512B blocks) of the parameter section.'''
bytes = 4. + sum(g.binary_size() for g in self.groups.values())
return int(np.ceil(bytes / 512))
@property
def point_rate(self):
return self.get_float('POINT:RATE')
@property
def point_scale(self):
return self.get_float('POINT:SCALE')
@property
def point_used(self):
return self.get_uint16('POINT:USED')
@property
def analog_used(self):
try:
return self.get_uint16('ANALOG:USED')
except AttributeError:
return 0
@property
def analog_rate(self):
try:
return self.get_float('ANALOG:RATE')
except AttributeError:
return 0
@property
def point_labels(self):
return self.get('POINT:LABELS').string_array
@property
def analog_labels(self):
return self.get('ANALOG:LABELS').string_array
def first_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_START_FIELD')
if param is not None:
return param.int32_value
return self.header.first_frame
def last_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_END_FIELD')
if param is not None:
return param.int32_value
return self.header.last_frame
|
EmbodiedCognition/py-c3d | c3d.py | Manager.get | python | def get(self, group, default=None):
'''Get a group or parameter.
Parameters
----------
group : str
If this string contains a period (.), then the part before the
period will be used to retrieve a group, and the part after the
period will be used to retrieve a parameter from that group. If this
string does not contain a period, then just a group will be
returned.
default : any
Return this value if the named group and parameter are not found.
Returns
-------
value : :class:`Group` or :class:`Param`
Either a group or parameter with the specified name(s). If neither
is found, returns the default value.
'''
if isinstance(group, int):
return self.groups.get(group, default)
group = group.upper()
param = None
if '.' in group:
group, param = group.split('.', 1)
if ':' in group:
group, param = group.split(':', 1)
if group not in self.groups:
return default
group = self.groups[group]
if param is not None:
return group.get(param, default)
return group | Get a group or parameter.
Parameters
----------
group : str
If this string contains a period (.), then the part before the
period will be used to retrieve a group, and the part after the
period will be used to retrieve a parameter from that group. If this
string does not contain a period, then just a group will be
returned.
default : any
Return this value if the named group and parameter are not found.
Returns
-------
value : :class:`Group` or :class:`Param`
Either a group or parameter with the specified name(s). If neither
is found, returns the default value. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L598-L631 | null | class Manager(object):
'''A base class for managing C3D file metadata.
This class manages a C3D header (which contains some stock metadata fields)
as well as a set of parameter groups. Each group is accessible using its
name.
Attributes
----------
header : `Header`
Header information for the C3D file.
'''
def __init__(self, header=None):
'''Set up a new Manager with a Header.'''
self.header = header or Header()
self.groups = {}
def check_metadata(self):
'''Ensure that the metadata in our file is self-consistent.'''
assert self.header.point_count == self.point_used, (
'inconsistent point count! {} header != {} POINT:USED'.format(
self.header.point_count,
self.point_used,
))
assert self.header.scale_factor == self.point_scale, (
'inconsistent scale factor! {} header != {} POINT:SCALE'.format(
self.header.scale_factor,
self.point_scale,
))
assert self.header.frame_rate == self.point_rate, (
'inconsistent frame rate! {} header != {} POINT:RATE'.format(
self.header.frame_rate,
self.point_rate,
))
ratio = self.analog_rate / self.point_rate
assert True or self.header.analog_per_frame == ratio, (
'inconsistent analog rate! {} header != {} analog-fps / {} point-fps'.format(
self.header.analog_per_frame,
self.analog_rate,
self.point_rate,
))
count = self.analog_used * self.header.analog_per_frame
assert True or self.header.analog_count == count, (
'inconsistent analog count! {} header != {} analog used * {} per-frame'.format(
self.header.analog_count,
self.analog_used,
self.header.analog_per_frame,
))
start = self.get_uint16('POINT:DATA_START')
assert self.header.data_block == start, (
'inconsistent data block! {} header != {} POINT:DATA_START'.format(
self.header.data_block, start))
for name in ('POINT:LABELS', 'POINT:DESCRIPTIONS',
'ANALOG:LABELS', 'ANALOG:DESCRIPTIONS'):
if self.get(name) is None:
warnings.warn('missing parameter {}'.format(name))
def add_group(self, group_id, name, desc):
'''Add a new parameter group.
Parameters
----------
group_id : int
The numeric ID for a group to check or create.
name : str, optional
If a group is created, assign this name to the group.
desc : str, optional
If a group is created, assign this description to the group.
Returns
-------
group : :class:`Group`
A group with the given ID, name, and description.
Raises
------
KeyError
If a group with a duplicate ID or name already exists.
'''
if group_id in self.groups:
raise KeyError(group_id)
name = name.upper()
if name in self.groups:
raise KeyError(name)
group = self.groups[name] = self.groups[group_id] = Group(name, desc)
return group
def get_int8(self, key):
'''Get a parameter value as an 8-bit signed integer.'''
return self.get(key).int8_value
def get_uint8(self, key):
'''Get a parameter value as an 8-bit unsigned integer.'''
return self.get(key).uint8_value
def get_int16(self, key):
'''Get a parameter value as a 16-bit signed integer.'''
return self.get(key).int16_value
def get_uint16(self, key):
'''Get a parameter value as a 16-bit unsigned integer.'''
return self.get(key).uint16_value
def get_int32(self, key):
'''Get a parameter value as a 32-bit signed integer.'''
return self.get(key).int32_value
def get_uint32(self, key):
'''Get a parameter value as a 32-bit unsigned integer.'''
return self.get(key).uint32_value
def get_float(self, key):
'''Get a parameter value as a 32-bit float.'''
return self.get(key).float_value
def get_bytes(self, key):
'''Get a parameter value as a byte string.'''
return self.get(key).bytes_value
def get_string(self, key):
'''Get a parameter value as a string.'''
return self.get(key).string_value
def parameter_blocks(self):
'''Compute the size (in 512B blocks) of the parameter section.'''
bytes = 4. + sum(g.binary_size() for g in self.groups.values())
return int(np.ceil(bytes / 512))
@property
def point_rate(self):
return self.get_float('POINT:RATE')
@property
def point_scale(self):
return self.get_float('POINT:SCALE')
@property
def point_used(self):
return self.get_uint16('POINT:USED')
@property
def analog_used(self):
try:
return self.get_uint16('ANALOG:USED')
except AttributeError:
return 0
@property
def analog_rate(self):
try:
return self.get_float('ANALOG:RATE')
except AttributeError:
return 0
@property
def point_labels(self):
return self.get('POINT:LABELS').string_array
@property
def analog_labels(self):
return self.get('ANALOG:LABELS').string_array
def first_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_START_FIELD')
if param is not None:
return param.int32_value
return self.header.first_frame
def last_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_END_FIELD')
if param is not None:
return param.int32_value
return self.header.last_frame
|
EmbodiedCognition/py-c3d | c3d.py | Manager.parameter_blocks | python | def parameter_blocks(self):
'''Compute the size (in 512B blocks) of the parameter section.'''
bytes = 4. + sum(g.binary_size() for g in self.groups.values())
return int(np.ceil(bytes / 512)) | Compute the size (in 512B blocks) of the parameter section. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L669-L672 | null | class Manager(object):
'''A base class for managing C3D file metadata.
This class manages a C3D header (which contains some stock metadata fields)
as well as a set of parameter groups. Each group is accessible using its
name.
Attributes
----------
header : `Header`
Header information for the C3D file.
'''
def __init__(self, header=None):
'''Set up a new Manager with a Header.'''
self.header = header or Header()
self.groups = {}
def check_metadata(self):
'''Ensure that the metadata in our file is self-consistent.'''
assert self.header.point_count == self.point_used, (
'inconsistent point count! {} header != {} POINT:USED'.format(
self.header.point_count,
self.point_used,
))
assert self.header.scale_factor == self.point_scale, (
'inconsistent scale factor! {} header != {} POINT:SCALE'.format(
self.header.scale_factor,
self.point_scale,
))
assert self.header.frame_rate == self.point_rate, (
'inconsistent frame rate! {} header != {} POINT:RATE'.format(
self.header.frame_rate,
self.point_rate,
))
ratio = self.analog_rate / self.point_rate
assert True or self.header.analog_per_frame == ratio, (
'inconsistent analog rate! {} header != {} analog-fps / {} point-fps'.format(
self.header.analog_per_frame,
self.analog_rate,
self.point_rate,
))
count = self.analog_used * self.header.analog_per_frame
assert True or self.header.analog_count == count, (
'inconsistent analog count! {} header != {} analog used * {} per-frame'.format(
self.header.analog_count,
self.analog_used,
self.header.analog_per_frame,
))
start = self.get_uint16('POINT:DATA_START')
assert self.header.data_block == start, (
'inconsistent data block! {} header != {} POINT:DATA_START'.format(
self.header.data_block, start))
for name in ('POINT:LABELS', 'POINT:DESCRIPTIONS',
'ANALOG:LABELS', 'ANALOG:DESCRIPTIONS'):
if self.get(name) is None:
warnings.warn('missing parameter {}'.format(name))
def add_group(self, group_id, name, desc):
'''Add a new parameter group.
Parameters
----------
group_id : int
The numeric ID for a group to check or create.
name : str, optional
If a group is created, assign this name to the group.
desc : str, optional
If a group is created, assign this description to the group.
Returns
-------
group : :class:`Group`
A group with the given ID, name, and description.
Raises
------
KeyError
If a group with a duplicate ID or name already exists.
'''
if group_id in self.groups:
raise KeyError(group_id)
name = name.upper()
if name in self.groups:
raise KeyError(name)
group = self.groups[name] = self.groups[group_id] = Group(name, desc)
return group
def get(self, group, default=None):
'''Get a group or parameter.
Parameters
----------
group : str
If this string contains a period (.), then the part before the
period will be used to retrieve a group, and the part after the
period will be used to retrieve a parameter from that group. If this
string does not contain a period, then just a group will be
returned.
default : any
Return this value if the named group and parameter are not found.
Returns
-------
value : :class:`Group` or :class:`Param`
Either a group or parameter with the specified name(s). If neither
is found, returns the default value.
'''
if isinstance(group, int):
return self.groups.get(group, default)
group = group.upper()
param = None
if '.' in group:
group, param = group.split('.', 1)
if ':' in group:
group, param = group.split(':', 1)
if group not in self.groups:
return default
group = self.groups[group]
if param is not None:
return group.get(param, default)
return group
def get_int8(self, key):
'''Get a parameter value as an 8-bit signed integer.'''
return self.get(key).int8_value
def get_uint8(self, key):
'''Get a parameter value as an 8-bit unsigned integer.'''
return self.get(key).uint8_value
def get_int16(self, key):
'''Get a parameter value as a 16-bit signed integer.'''
return self.get(key).int16_value
def get_uint16(self, key):
'''Get a parameter value as a 16-bit unsigned integer.'''
return self.get(key).uint16_value
def get_int32(self, key):
'''Get a parameter value as a 32-bit signed integer.'''
return self.get(key).int32_value
def get_uint32(self, key):
'''Get a parameter value as a 32-bit unsigned integer.'''
return self.get(key).uint32_value
def get_float(self, key):
'''Get a parameter value as a 32-bit float.'''
return self.get(key).float_value
def get_bytes(self, key):
'''Get a parameter value as a byte string.'''
return self.get(key).bytes_value
def get_string(self, key):
'''Get a parameter value as a string.'''
return self.get(key).string_value
@property
def point_rate(self):
return self.get_float('POINT:RATE')
@property
def point_scale(self):
return self.get_float('POINT:SCALE')
@property
def point_used(self):
return self.get_uint16('POINT:USED')
@property
def analog_used(self):
try:
return self.get_uint16('ANALOG:USED')
except AttributeError:
return 0
@property
def analog_rate(self):
try:
return self.get_float('ANALOG:RATE')
except AttributeError:
return 0
@property
def point_labels(self):
return self.get('POINT:LABELS').string_array
@property
def analog_labels(self):
return self.get('ANALOG:LABELS').string_array
def first_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_START_FIELD')
if param is not None:
return param.int32_value
return self.header.first_frame
def last_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_END_FIELD')
if param is not None:
return param.int32_value
return self.header.last_frame
|
EmbodiedCognition/py-c3d | c3d.py | Reader.read_frames | python | def read_frames(self, copy=True):
'''Iterate over the data frames from our C3D file handle.
Parameters
----------
copy : bool
If False, the reader returns a reference to the same data buffers
for every frame. The default is True, which causes the reader to
return a unique data buffer for each frame. Set this to False if you
consume frames as you iterate over them, or True if you store them
for later.
Returns
-------
frames : sequence of (frame number, points, analog)
This method generates a sequence of (frame number, points, analog)
tuples, one tuple per frame. The first element of each tuple is the
frame number. The second is a numpy array of parsed, 5D point data
and the third element of each tuple is a numpy array of analog
values that were recorded during the frame. (Often the analog data
are sampled at a higher frequency than the 3D point data, resulting
in multiple analog frames per frame of point data.)
The first three columns in the returned point data are the (x, y, z)
coordinates of the observed motion capture point. The fourth column
is an estimate of the error for this particular point, and the fifth
column is the number of cameras that observed the point in question.
Both the fourth and fifth values are -1 if the point is considered
to be invalid.
'''
scale = abs(self.point_scale)
is_float = self.point_scale < 0
point_bytes = [2, 4][is_float]
point_dtype = [np.int16, np.float32][is_float]
point_scale = [scale, 1][is_float]
points = np.zeros((self.point_used, 5), float)
# TODO: handle ANALOG:BITS parameter here!
p = self.get('ANALOG:FORMAT')
analog_unsigned = p and p.string_value.strip().upper() == 'UNSIGNED'
analog_dtype = np.int16
analog_bytes = 2
if is_float:
analog_dtype = np.float32
analog_bytes = 4
elif analog_unsigned:
analog_dtype = np.uint16
analog_bytes = 2
analog = np.array([], float)
offsets = np.zeros((self.analog_used, 1), int)
param = self.get('ANALOG:OFFSET')
if param is not None:
offsets = param.int16_array[:self.analog_used, None]
scales = np.ones((self.analog_used, 1), float)
param = self.get('ANALOG:SCALE')
if param is not None:
scales = param.float_array[:self.analog_used, None]
gen_scale = 1.
param = self.get('ANALOG:GEN_SCALE')
if param is not None:
gen_scale = param.float_value
self._handle.seek((self.header.data_block - 1) * 512)
for frame_no in range(self.first_frame(), self.last_frame() + 1):
n = 4 * self.header.point_count
raw = np.fromstring(self._handle.read(n * point_bytes),
dtype=point_dtype,
count=n).reshape((self.point_used, 4))
points[:, :3] = raw[:, :3] * point_scale
valid = raw[:, 3] > -1
points[~valid, 3:5] = -1
c = raw[valid, 3].astype(np.uint16)
# fourth value is floating-point (scaled) error estimate
points[valid, 3] = (c & 0xff).astype(float) * scale
# fifth value is number of bits set in camera-observation byte
points[valid, 4] = sum((c & (1 << k)) >> k for k in range(8, 17))
if self.header.analog_count > 0:
n = self.header.analog_count
raw = np.fromstring(self._handle.read(n * analog_bytes),
dtype=analog_dtype,
count=n).reshape((-1, self.analog_used)).T
analog = (raw.astype(float) - offsets) * scales * gen_scale
if copy:
yield frame_no, points.copy(), analog.copy()
else:
yield frame_no, points, analog | Iterate over the data frames from our C3D file handle.
Parameters
----------
copy : bool
If False, the reader returns a reference to the same data buffers
for every frame. The default is True, which causes the reader to
return a unique data buffer for each frame. Set this to False if you
consume frames as you iterate over them, or True if you store them
for later.
Returns
-------
frames : sequence of (frame number, points, analog)
This method generates a sequence of (frame number, points, analog)
tuples, one tuple per frame. The first element of each tuple is the
frame number. The second is a numpy array of parsed, 5D point data
and the third element of each tuple is a numpy array of analog
values that were recorded during the frame. (Often the analog data
are sampled at a higher frequency than the 3D point data, resulting
in multiple analog frames per frame of point data.)
The first three columns in the returned point data are the (x, y, z)
coordinates of the observed motion capture point. The fourth column
is an estimate of the error for this particular point, and the fifth
column is the number of cameras that observed the point in question.
Both the fourth and fifth values are -1 if the point is considered
to be invalid. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L804-L899 | [
"def get(self, group, default=None):\n '''Get a group or parameter.\n\n Parameters\n ----------\n group : str\n If this string contains a period (.), then the part before the\n period will be used to retrieve a group, and the part after the\n period will be used to retrieve a parameter from that group. If this\n string does not contain a period, then just a group will be\n returned.\n default : any\n Return this value if the named group and parameter are not found.\n\n Returns\n -------\n value : :class:`Group` or :class:`Param`\n Either a group or parameter with the specified name(s). If neither\n is found, returns the default value.\n '''\n if isinstance(group, int):\n return self.groups.get(group, default)\n group = group.upper()\n param = None\n if '.' in group:\n group, param = group.split('.', 1)\n if ':' in group:\n group, param = group.split(':', 1)\n if group not in self.groups:\n return default\n group = self.groups[group]\n if param is not None:\n return group.get(param, default)\n return group\n",
"def first_frame(self):\n # this is a hack for phasespace files ... should put it in a subclass.\n param = self.get('TRIAL:ACTUAL_START_FIELD')\n if param is not None:\n return param.int32_value\n return self.header.first_frame\n",
"def last_frame(self):\n # this is a hack for phasespace files ... should put it in a subclass.\n param = self.get('TRIAL:ACTUAL_END_FIELD')\n if param is not None:\n return param.int32_value\n return self.header.last_frame\n"
] | class Reader(Manager):
'''This class provides methods for reading the data in a C3D file.
A C3D file contains metadata and frame-based data describing 3D motion.
You can iterate over the frames in the file by calling `read_frames()` after
construction:
>>> r = c3d.Reader(open('capture.c3d', 'rb'))
>>> for frame_no, points, analog in r.read_frames():
... print('{0.shape} points in this frame'.format(points))
'''
def __init__(self, handle):
'''Initialize this C3D file by reading header and parameter data.
Parameters
----------
handle : file handle
Read metadata and C3D motion frames from the given file handle. This
handle is assumed to be `seek`-able and `read`-able. The handle must
remain open for the life of the `Reader` instance. The `Reader` does
not `close` the handle.
Raises
------
ValueError
If the processor metadata in the C3D file is anything other than 84
(Intel format).
'''
super(Reader, self).__init__(Header(handle))
self._handle = handle
self._handle.seek((self.header.parameter_block - 1) * 512)
# metadata header
buf = self._handle.read(4)
_, _, parameter_blocks, processor = struct.unpack('BBBB', buf)
if processor != PROCESSOR_INTEL:
raise ValueError(
'we only read Intel C3D files (got processor {})'.
format(processor))
# read all parameter blocks as a single chunk to avoid block
# boundary issues.
bytes = self._handle.read(512 * parameter_blocks - 4)
while bytes:
buf = io.BytesIO(bytes)
chars_in_name, group_id = struct.unpack('bb', buf.read(2))
if group_id == 0 or chars_in_name == 0:
# we've reached the end of the parameter section.
break
name = buf.read(abs(chars_in_name)).decode('utf-8').upper()
offset_to_next, = struct.unpack('<h', buf.read(2))
if group_id > 0:
# we've just started reading a parameter. if its group doesn't
# exist, create a blank one. add the parameter to the group.
self.groups.setdefault(group_id, Group()).add_param(name, handle=buf)
else:
# we've just started reading a group. if a group with the
# appropriate id exists already (because we've already created
# it for a parameter), just set the name of the group.
# otherwise, add a new group.
group_id = abs(group_id)
size, = struct.unpack('B', buf.read(1))
desc = size and buf.read(size) or ''
group = self.get(group_id)
if group is not None:
group.name = name
group.desc = desc
self.groups[name] = group
else:
self.add_group(group_id, name, desc)
bytes = bytes[2 + abs(chars_in_name) + offset_to_next:]
self.check_metadata()
|
EmbodiedCognition/py-c3d | c3d.py | Writer._pad_block | python | def _pad_block(self, handle):
'''Pad the file with 0s to the end of the next block boundary.'''
extra = handle.tell() % 512
if extra:
handle.write(b'\x00' * (512 - extra)) | Pad the file with 0s to the end of the next block boundary. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L956-L960 | null | class Writer(Manager):
'''This class writes metadata and frames to a C3D file.
For example, to read an existing C3D file, apply some sort of data
processing to the frames, and write out another C3D file::
>>> r = c3d.Reader(open('data.c3d', 'rb'))
>>> w = c3d.Writer()
>>> w.add_frames(process_frames_somehow(r.read_frames()))
>>> with open('smoothed.c3d', 'wb') as handle:
>>> w.write(handle)
Parameters
----------
point_rate : float, optional
The frame rate of the data. Defaults to 480.
analog_rate : float, optional
The number of analog samples per frame. Defaults to 0.
point_scale : float, optional
The scale factor for point data. Defaults to -1 (i.e., "check the
POINT:SCALE parameter").
point_units : str, optional
The units that the point numbers represent. Defaults to ``'mm '``.
gen_scale : float, optional
General scaling factor for data. Defaults to 1.
'''
def __init__(self,
point_rate=480.,
analog_rate=0.,
point_scale=-1.,
point_units='mm ',
gen_scale=1.):
'''Set metadata for this writer.
'''
super(Writer, self).__init__()
self._point_rate = point_rate
self._analog_rate = analog_rate
self._point_scale = point_scale
self._point_units = point_units
self._gen_scale = gen_scale
self._frames = []
def add_frames(self, frames):
'''Add frames to this writer instance.
Parameters
----------
frames : sequence of (point, analog) tuples
A sequence of frame data to add to the writer.
'''
self._frames.extend(frames)
def _write_metadata(self, handle):
'''Write metadata to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
self.check_metadata()
# header
self.header.write(handle)
self._pad_block(handle)
assert handle.tell() == 512
# groups
handle.write(struct.pack(
'BBBB', 0, 0, self.parameter_blocks(), PROCESSOR_INTEL))
id_groups = sorted(
(i, g) for i, g in self.groups.items() if isinstance(i, int))
for group_id, group in id_groups:
group.write(group_id, handle)
# padding
self._pad_block(handle)
while handle.tell() != 512 * (self.header.data_block - 1):
handle.write(b'\x00' * 512)
def _write_frames(self, handle):
'''Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
assert handle.tell() == 512 * (self.header.data_block - 1)
scale = abs(self.point_scale)
is_float = self.point_scale < 0
point_dtype = [np.int16, np.float32][is_float]
point_scale = [scale, 1][is_float]
point_format = 'if'[is_float]
raw = np.empty((self.point_used, 4), point_dtype)
for points, analog in self._frames:
valid = points[:, 3] > -1
raw[~valid, 3] = -1
raw[valid, :3] = points[valid, :3] / self._point_scale
raw[valid, 3] = (
((points[valid, 4]).astype(np.uint8) << 8) |
(points[valid, 3] / scale).astype(np.uint16)
)
point = array.array(point_format)
point.extend(raw.flatten())
point.tofile(handle)
analog = array.array(point_format)
analog.extend(analog)
analog.tofile(handle)
self._pad_block(handle)
def write(self, handle):
'''Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
if not self._frames:
return
def add(name, desc, bpe, format, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=bpe,
bytes=struct.pack(format, bytes),
dimensions=list(dimensions))
def add_str(name, desc, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=-1,
bytes=bytes.encode('utf-8'),
dimensions=list(dimensions))
def add_empty_array(name, desc, bpe):
group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0])
points, analog = self._frames[0]
ppf = len(points)
# POINT group
group = self.add_group(1, 'POINT', 'POINT group')
add('USED', 'Number of 3d markers', 2, '<H', ppf)
add('FRAMES', 'frame count', 2, '<H', min(65535, len(self._frames)))
add('DATA_START', 'data block number', 2, '<H', 0)
add('SCALE', '3d scale factor', 4, '<f', self._point_scale)
add('RATE', '3d data capture rate', 4, '<f', self._point_rate)
add_str('X_SCREEN', 'X_SCREEN parameter', '+X', 2)
add_str('Y_SCREEN', 'Y_SCREEN parameter', '+Y', 2)
add_str('UNITS', '3d data units', self._point_units, len(self._point_units))
add_str('LABELS', 'labels', ''.join('M%03d ' % i for i in range(ppf)), 5, ppf)
add_str('DESCRIPTIONS', 'descriptions', ' ' * 16 * ppf, 16, ppf)
# ANALOG group
group = self.add_group(2, 'ANALOG', 'ANALOG group')
add('USED', 'analog channel count', 2, '<H', analog.shape[0])
add('RATE', 'analog samples per 3d frame', 4, '<f', analog.shape[1])
add('GEN_SCALE', 'analog general scale factor', 4, '<f', self._gen_scale)
add_empty_array('SCALE', 'analog channel scale factors', 4)
add_empty_array('OFFSET', 'analog channel offsets', 2)
# TRIAL group
group = self.add_group(3, 'TRIAL', 'TRIAL group')
add('ACTUAL_START_FIELD', 'actual start frame', 2, '<I', 1, 2)
add('ACTUAL_END_FIELD', 'actual end frame', 2, '<I', len(self._frames), 2)
# sync parameter information to header.
blocks = self.parameter_blocks()
self.get('POINT:DATA_START').bytes = struct.pack('<H', 2 + blocks)
self.header.data_block = 2 + blocks
self.header.frame_rate = self._point_rate
self.header.last_frame = min(len(self._frames), 65535)
self.header.point_count = ppf
self.header.analog_count = np.prod(analog.shape)
self.header.analog_per_frame = analog.shape[0]
self.header.scale_factor = self._point_scale
self._write_metadata(handle)
self._write_frames(handle)
|
EmbodiedCognition/py-c3d | c3d.py | Writer._write_metadata | python | def _write_metadata(self, handle):
'''Write metadata to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
self.check_metadata()
# header
self.header.write(handle)
self._pad_block(handle)
assert handle.tell() == 512
# groups
handle.write(struct.pack(
'BBBB', 0, 0, self.parameter_blocks(), PROCESSOR_INTEL))
id_groups = sorted(
(i, g) for i, g in self.groups.items() if isinstance(i, int))
for group_id, group in id_groups:
group.write(group_id, handle)
# padding
self._pad_block(handle)
while handle.tell() != 512 * (self.header.data_block - 1):
handle.write(b'\x00' * 512) | Write metadata to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L962-L989 | null | class Writer(Manager):
'''This class writes metadata and frames to a C3D file.
For example, to read an existing C3D file, apply some sort of data
processing to the frames, and write out another C3D file::
>>> r = c3d.Reader(open('data.c3d', 'rb'))
>>> w = c3d.Writer()
>>> w.add_frames(process_frames_somehow(r.read_frames()))
>>> with open('smoothed.c3d', 'wb') as handle:
>>> w.write(handle)
Parameters
----------
point_rate : float, optional
The frame rate of the data. Defaults to 480.
analog_rate : float, optional
The number of analog samples per frame. Defaults to 0.
point_scale : float, optional
The scale factor for point data. Defaults to -1 (i.e., "check the
POINT:SCALE parameter").
point_units : str, optional
The units that the point numbers represent. Defaults to ``'mm '``.
gen_scale : float, optional
General scaling factor for data. Defaults to 1.
'''
def __init__(self,
point_rate=480.,
analog_rate=0.,
point_scale=-1.,
point_units='mm ',
gen_scale=1.):
'''Set metadata for this writer.
'''
super(Writer, self).__init__()
self._point_rate = point_rate
self._analog_rate = analog_rate
self._point_scale = point_scale
self._point_units = point_units
self._gen_scale = gen_scale
self._frames = []
def add_frames(self, frames):
'''Add frames to this writer instance.
Parameters
----------
frames : sequence of (point, analog) tuples
A sequence of frame data to add to the writer.
'''
self._frames.extend(frames)
def _pad_block(self, handle):
'''Pad the file with 0s to the end of the next block boundary.'''
extra = handle.tell() % 512
if extra:
handle.write(b'\x00' * (512 - extra))
def _write_frames(self, handle):
'''Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
assert handle.tell() == 512 * (self.header.data_block - 1)
scale = abs(self.point_scale)
is_float = self.point_scale < 0
point_dtype = [np.int16, np.float32][is_float]
point_scale = [scale, 1][is_float]
point_format = 'if'[is_float]
raw = np.empty((self.point_used, 4), point_dtype)
for points, analog in self._frames:
valid = points[:, 3] > -1
raw[~valid, 3] = -1
raw[valid, :3] = points[valid, :3] / self._point_scale
raw[valid, 3] = (
((points[valid, 4]).astype(np.uint8) << 8) |
(points[valid, 3] / scale).astype(np.uint16)
)
point = array.array(point_format)
point.extend(raw.flatten())
point.tofile(handle)
analog = array.array(point_format)
analog.extend(analog)
analog.tofile(handle)
self._pad_block(handle)
def write(self, handle):
'''Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
if not self._frames:
return
def add(name, desc, bpe, format, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=bpe,
bytes=struct.pack(format, bytes),
dimensions=list(dimensions))
def add_str(name, desc, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=-1,
bytes=bytes.encode('utf-8'),
dimensions=list(dimensions))
def add_empty_array(name, desc, bpe):
group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0])
points, analog = self._frames[0]
ppf = len(points)
# POINT group
group = self.add_group(1, 'POINT', 'POINT group')
add('USED', 'Number of 3d markers', 2, '<H', ppf)
add('FRAMES', 'frame count', 2, '<H', min(65535, len(self._frames)))
add('DATA_START', 'data block number', 2, '<H', 0)
add('SCALE', '3d scale factor', 4, '<f', self._point_scale)
add('RATE', '3d data capture rate', 4, '<f', self._point_rate)
add_str('X_SCREEN', 'X_SCREEN parameter', '+X', 2)
add_str('Y_SCREEN', 'Y_SCREEN parameter', '+Y', 2)
add_str('UNITS', '3d data units', self._point_units, len(self._point_units))
add_str('LABELS', 'labels', ''.join('M%03d ' % i for i in range(ppf)), 5, ppf)
add_str('DESCRIPTIONS', 'descriptions', ' ' * 16 * ppf, 16, ppf)
# ANALOG group
group = self.add_group(2, 'ANALOG', 'ANALOG group')
add('USED', 'analog channel count', 2, '<H', analog.shape[0])
add('RATE', 'analog samples per 3d frame', 4, '<f', analog.shape[1])
add('GEN_SCALE', 'analog general scale factor', 4, '<f', self._gen_scale)
add_empty_array('SCALE', 'analog channel scale factors', 4)
add_empty_array('OFFSET', 'analog channel offsets', 2)
# TRIAL group
group = self.add_group(3, 'TRIAL', 'TRIAL group')
add('ACTUAL_START_FIELD', 'actual start frame', 2, '<I', 1, 2)
add('ACTUAL_END_FIELD', 'actual end frame', 2, '<I', len(self._frames), 2)
# sync parameter information to header.
blocks = self.parameter_blocks()
self.get('POINT:DATA_START').bytes = struct.pack('<H', 2 + blocks)
self.header.data_block = 2 + blocks
self.header.frame_rate = self._point_rate
self.header.last_frame = min(len(self._frames), 65535)
self.header.point_count = ppf
self.header.analog_count = np.prod(analog.shape)
self.header.analog_per_frame = analog.shape[0]
self.header.scale_factor = self._point_scale
self._write_metadata(handle)
self._write_frames(handle)
|
EmbodiedCognition/py-c3d | c3d.py | Writer._write_frames | python | def _write_frames(self, handle):
'''Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
assert handle.tell() == 512 * (self.header.data_block - 1)
scale = abs(self.point_scale)
is_float = self.point_scale < 0
point_dtype = [np.int16, np.float32][is_float]
point_scale = [scale, 1][is_float]
point_format = 'if'[is_float]
raw = np.empty((self.point_used, 4), point_dtype)
for points, analog in self._frames:
valid = points[:, 3] > -1
raw[~valid, 3] = -1
raw[valid, :3] = points[valid, :3] / self._point_scale
raw[valid, 3] = (
((points[valid, 4]).astype(np.uint8) << 8) |
(points[valid, 3] / scale).astype(np.uint16)
)
point = array.array(point_format)
point.extend(raw.flatten())
point.tofile(handle)
analog = array.array(point_format)
analog.extend(analog)
analog.tofile(handle)
self._pad_block(handle) | Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L991-L1021 | null | class Writer(Manager):
'''This class writes metadata and frames to a C3D file.
For example, to read an existing C3D file, apply some sort of data
processing to the frames, and write out another C3D file::
>>> r = c3d.Reader(open('data.c3d', 'rb'))
>>> w = c3d.Writer()
>>> w.add_frames(process_frames_somehow(r.read_frames()))
>>> with open('smoothed.c3d', 'wb') as handle:
>>> w.write(handle)
Parameters
----------
point_rate : float, optional
The frame rate of the data. Defaults to 480.
analog_rate : float, optional
The number of analog samples per frame. Defaults to 0.
point_scale : float, optional
The scale factor for point data. Defaults to -1 (i.e., "check the
POINT:SCALE parameter").
point_units : str, optional
The units that the point numbers represent. Defaults to ``'mm '``.
gen_scale : float, optional
General scaling factor for data. Defaults to 1.
'''
def __init__(self,
point_rate=480.,
analog_rate=0.,
point_scale=-1.,
point_units='mm ',
gen_scale=1.):
'''Set metadata for this writer.
'''
super(Writer, self).__init__()
self._point_rate = point_rate
self._analog_rate = analog_rate
self._point_scale = point_scale
self._point_units = point_units
self._gen_scale = gen_scale
self._frames = []
def add_frames(self, frames):
'''Add frames to this writer instance.
Parameters
----------
frames : sequence of (point, analog) tuples
A sequence of frame data to add to the writer.
'''
self._frames.extend(frames)
def _pad_block(self, handle):
'''Pad the file with 0s to the end of the next block boundary.'''
extra = handle.tell() % 512
if extra:
handle.write(b'\x00' * (512 - extra))
def _write_metadata(self, handle):
'''Write metadata to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
self.check_metadata()
# header
self.header.write(handle)
self._pad_block(handle)
assert handle.tell() == 512
# groups
handle.write(struct.pack(
'BBBB', 0, 0, self.parameter_blocks(), PROCESSOR_INTEL))
id_groups = sorted(
(i, g) for i, g in self.groups.items() if isinstance(i, int))
for group_id, group in id_groups:
group.write(group_id, handle)
# padding
self._pad_block(handle)
while handle.tell() != 512 * (self.header.data_block - 1):
handle.write(b'\x00' * 512)
def write(self, handle):
'''Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
if not self._frames:
return
def add(name, desc, bpe, format, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=bpe,
bytes=struct.pack(format, bytes),
dimensions=list(dimensions))
def add_str(name, desc, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=-1,
bytes=bytes.encode('utf-8'),
dimensions=list(dimensions))
def add_empty_array(name, desc, bpe):
group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0])
points, analog = self._frames[0]
ppf = len(points)
# POINT group
group = self.add_group(1, 'POINT', 'POINT group')
add('USED', 'Number of 3d markers', 2, '<H', ppf)
add('FRAMES', 'frame count', 2, '<H', min(65535, len(self._frames)))
add('DATA_START', 'data block number', 2, '<H', 0)
add('SCALE', '3d scale factor', 4, '<f', self._point_scale)
add('RATE', '3d data capture rate', 4, '<f', self._point_rate)
add_str('X_SCREEN', 'X_SCREEN parameter', '+X', 2)
add_str('Y_SCREEN', 'Y_SCREEN parameter', '+Y', 2)
add_str('UNITS', '3d data units', self._point_units, len(self._point_units))
add_str('LABELS', 'labels', ''.join('M%03d ' % i for i in range(ppf)), 5, ppf)
add_str('DESCRIPTIONS', 'descriptions', ' ' * 16 * ppf, 16, ppf)
# ANALOG group
group = self.add_group(2, 'ANALOG', 'ANALOG group')
add('USED', 'analog channel count', 2, '<H', analog.shape[0])
add('RATE', 'analog samples per 3d frame', 4, '<f', analog.shape[1])
add('GEN_SCALE', 'analog general scale factor', 4, '<f', self._gen_scale)
add_empty_array('SCALE', 'analog channel scale factors', 4)
add_empty_array('OFFSET', 'analog channel offsets', 2)
# TRIAL group
group = self.add_group(3, 'TRIAL', 'TRIAL group')
add('ACTUAL_START_FIELD', 'actual start frame', 2, '<I', 1, 2)
add('ACTUAL_END_FIELD', 'actual end frame', 2, '<I', len(self._frames), 2)
# sync parameter information to header.
blocks = self.parameter_blocks()
self.get('POINT:DATA_START').bytes = struct.pack('<H', 2 + blocks)
self.header.data_block = 2 + blocks
self.header.frame_rate = self._point_rate
self.header.last_frame = min(len(self._frames), 65535)
self.header.point_count = ppf
self.header.analog_count = np.prod(analog.shape)
self.header.analog_per_frame = analog.shape[0]
self.header.scale_factor = self._point_scale
self._write_metadata(handle)
self._write_frames(handle)
|
EmbodiedCognition/py-c3d | c3d.py | Writer.write | python | def write(self, handle):
'''Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
if not self._frames:
return
def add(name, desc, bpe, format, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=bpe,
bytes=struct.pack(format, bytes),
dimensions=list(dimensions))
def add_str(name, desc, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=-1,
bytes=bytes.encode('utf-8'),
dimensions=list(dimensions))
def add_empty_array(name, desc, bpe):
group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0])
points, analog = self._frames[0]
ppf = len(points)
# POINT group
group = self.add_group(1, 'POINT', 'POINT group')
add('USED', 'Number of 3d markers', 2, '<H', ppf)
add('FRAMES', 'frame count', 2, '<H', min(65535, len(self._frames)))
add('DATA_START', 'data block number', 2, '<H', 0)
add('SCALE', '3d scale factor', 4, '<f', self._point_scale)
add('RATE', '3d data capture rate', 4, '<f', self._point_rate)
add_str('X_SCREEN', 'X_SCREEN parameter', '+X', 2)
add_str('Y_SCREEN', 'Y_SCREEN parameter', '+Y', 2)
add_str('UNITS', '3d data units', self._point_units, len(self._point_units))
add_str('LABELS', 'labels', ''.join('M%03d ' % i for i in range(ppf)), 5, ppf)
add_str('DESCRIPTIONS', 'descriptions', ' ' * 16 * ppf, 16, ppf)
# ANALOG group
group = self.add_group(2, 'ANALOG', 'ANALOG group')
add('USED', 'analog channel count', 2, '<H', analog.shape[0])
add('RATE', 'analog samples per 3d frame', 4, '<f', analog.shape[1])
add('GEN_SCALE', 'analog general scale factor', 4, '<f', self._gen_scale)
add_empty_array('SCALE', 'analog channel scale factors', 4)
add_empty_array('OFFSET', 'analog channel offsets', 2)
# TRIAL group
group = self.add_group(3, 'TRIAL', 'TRIAL group')
add('ACTUAL_START_FIELD', 'actual start frame', 2, '<I', 1, 2)
add('ACTUAL_END_FIELD', 'actual end frame', 2, '<I', len(self._frames), 2)
# sync parameter information to header.
blocks = self.parameter_blocks()
self.get('POINT:DATA_START').bytes = struct.pack('<H', 2 + blocks)
self.header.data_block = 2 + blocks
self.header.frame_rate = self._point_rate
self.header.last_frame = min(len(self._frames), 65535)
self.header.point_count = ppf
self.header.analog_count = np.prod(analog.shape)
self.header.analog_per_frame = analog.shape[0]
self.header.scale_factor = self._point_scale
self._write_metadata(handle)
self._write_frames(handle) | Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle. | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L1023-L1094 | [
"def add_group(self, group_id, name, desc):\n '''Add a new parameter group.\n\n Parameters\n ----------\n group_id : int\n The numeric ID for a group to check or create.\n name : str, optional\n If a group is created, assign this name to the group.\n desc : str, optional\n If a group is created, assign this description to the group.\n\n Returns\n -------\n group : :class:`Group`\n A group with the given ID, name, and description.\n\n Raises\n ------\n KeyError\n If a group with a duplicate ID or name already exists.\n '''\n if group_id in self.groups:\n raise KeyError(group_id)\n name = name.upper()\n if name in self.groups:\n raise KeyError(name)\n group = self.groups[name] = self.groups[group_id] = Group(name, desc)\n return group\n",
"def get(self, group, default=None):\n '''Get a group or parameter.\n\n Parameters\n ----------\n group : str\n If this string contains a period (.), then the part before the\n period will be used to retrieve a group, and the part after the\n period will be used to retrieve a parameter from that group. If this\n string does not contain a period, then just a group will be\n returned.\n default : any\n Return this value if the named group and parameter are not found.\n\n Returns\n -------\n value : :class:`Group` or :class:`Param`\n Either a group or parameter with the specified name(s). If neither\n is found, returns the default value.\n '''\n if isinstance(group, int):\n return self.groups.get(group, default)\n group = group.upper()\n param = None\n if '.' in group:\n group, param = group.split('.', 1)\n if ':' in group:\n group, param = group.split(':', 1)\n if group not in self.groups:\n return default\n group = self.groups[group]\n if param is not None:\n return group.get(param, default)\n return group\n",
"def parameter_blocks(self):\n '''Compute the size (in 512B blocks) of the parameter section.'''\n bytes = 4. + sum(g.binary_size() for g in self.groups.values())\n return int(np.ceil(bytes / 512))\n",
"def _write_metadata(self, handle):\n '''Write metadata to a file handle.\n\n Parameters\n ----------\n handle : file\n Write metadata and C3D motion frames to the given file handle. The\n writer does not close the handle.\n '''\n self.check_metadata()\n\n # header\n self.header.write(handle)\n self._pad_block(handle)\n assert handle.tell() == 512\n\n # groups\n handle.write(struct.pack(\n 'BBBB', 0, 0, self.parameter_blocks(), PROCESSOR_INTEL))\n id_groups = sorted(\n (i, g) for i, g in self.groups.items() if isinstance(i, int))\n for group_id, group in id_groups:\n group.write(group_id, handle)\n\n # padding\n self._pad_block(handle)\n while handle.tell() != 512 * (self.header.data_block - 1):\n handle.write(b'\\x00' * 512)\n",
"def _write_frames(self, handle):\n '''Write our frame data to the given file handle.\n\n Parameters\n ----------\n handle : file\n Write metadata and C3D motion frames to the given file handle. The\n writer does not close the handle.\n '''\n assert handle.tell() == 512 * (self.header.data_block - 1)\n scale = abs(self.point_scale)\n is_float = self.point_scale < 0\n point_dtype = [np.int16, np.float32][is_float]\n point_scale = [scale, 1][is_float]\n point_format = 'if'[is_float]\n raw = np.empty((self.point_used, 4), point_dtype)\n for points, analog in self._frames:\n valid = points[:, 3] > -1\n raw[~valid, 3] = -1\n raw[valid, :3] = points[valid, :3] / self._point_scale\n raw[valid, 3] = (\n ((points[valid, 4]).astype(np.uint8) << 8) |\n (points[valid, 3] / scale).astype(np.uint16)\n )\n point = array.array(point_format)\n point.extend(raw.flatten())\n point.tofile(handle)\n analog = array.array(point_format)\n analog.extend(analog)\n analog.tofile(handle)\n self._pad_block(handle)\n",
"def add(name, desc, bpe, format, bytes, *dimensions):\n group.add_param(name,\n desc=desc,\n bytes_per_element=bpe,\n bytes=struct.pack(format, bytes),\n dimensions=list(dimensions))\n",
"def add_str(name, desc, bytes, *dimensions):\n group.add_param(name,\n desc=desc,\n bytes_per_element=-1,\n bytes=bytes.encode('utf-8'),\n dimensions=list(dimensions))\n",
"def add_empty_array(name, desc, bpe):\n group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0])\n"
] | class Writer(Manager):
'''This class writes metadata and frames to a C3D file.
For example, to read an existing C3D file, apply some sort of data
processing to the frames, and write out another C3D file::
>>> r = c3d.Reader(open('data.c3d', 'rb'))
>>> w = c3d.Writer()
>>> w.add_frames(process_frames_somehow(r.read_frames()))
>>> with open('smoothed.c3d', 'wb') as handle:
>>> w.write(handle)
Parameters
----------
point_rate : float, optional
The frame rate of the data. Defaults to 480.
analog_rate : float, optional
The number of analog samples per frame. Defaults to 0.
point_scale : float, optional
The scale factor for point data. Defaults to -1 (i.e., "check the
POINT:SCALE parameter").
point_units : str, optional
The units that the point numbers represent. Defaults to ``'mm '``.
gen_scale : float, optional
General scaling factor for data. Defaults to 1.
'''
def __init__(self,
point_rate=480.,
analog_rate=0.,
point_scale=-1.,
point_units='mm ',
gen_scale=1.):
'''Set metadata for this writer.
'''
super(Writer, self).__init__()
self._point_rate = point_rate
self._analog_rate = analog_rate
self._point_scale = point_scale
self._point_units = point_units
self._gen_scale = gen_scale
self._frames = []
def add_frames(self, frames):
'''Add frames to this writer instance.
Parameters
----------
frames : sequence of (point, analog) tuples
A sequence of frame data to add to the writer.
'''
self._frames.extend(frames)
def _pad_block(self, handle):
'''Pad the file with 0s to the end of the next block boundary.'''
extra = handle.tell() % 512
if extra:
handle.write(b'\x00' * (512 - extra))
def _write_metadata(self, handle):
'''Write metadata to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
self.check_metadata()
# header
self.header.write(handle)
self._pad_block(handle)
assert handle.tell() == 512
# groups
handle.write(struct.pack(
'BBBB', 0, 0, self.parameter_blocks(), PROCESSOR_INTEL))
id_groups = sorted(
(i, g) for i, g in self.groups.items() if isinstance(i, int))
for group_id, group in id_groups:
group.write(group_id, handle)
# padding
self._pad_block(handle)
while handle.tell() != 512 * (self.header.data_block - 1):
handle.write(b'\x00' * 512)
def _write_frames(self, handle):
'''Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
assert handle.tell() == 512 * (self.header.data_block - 1)
scale = abs(self.point_scale)
is_float = self.point_scale < 0
point_dtype = [np.int16, np.float32][is_float]
point_scale = [scale, 1][is_float]
point_format = 'if'[is_float]
raw = np.empty((self.point_used, 4), point_dtype)
for points, analog in self._frames:
valid = points[:, 3] > -1
raw[~valid, 3] = -1
raw[valid, :3] = points[valid, :3] / self._point_scale
raw[valid, 3] = (
((points[valid, 4]).astype(np.uint8) << 8) |
(points[valid, 3] / scale).astype(np.uint16)
)
point = array.array(point_format)
point.extend(raw.flatten())
point.tofile(handle)
analog = array.array(point_format)
analog.extend(analog)
analog.tofile(handle)
self._pad_block(handle)
|
theno/utlz | fabfile.py | flo | python | def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals) | Return the string given by param formatted with the callers locals. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/fabfile.py#L10-L19 | null | import inspect
import sys
from os.path import dirname
from fabric.api import execute, local, task
from fabric.context_managers import warn_only, quiet
# inspired by: http://stackoverflow.com/a/6618825
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
cyan = _wrap_with('36')
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
@task
def clean(deltox=False):
'''Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
'''
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/'))
def _pyenv_exists():
with quiet():
res = local('pyenv')
if res.return_code == 127:
return False
return True
def _determine_latest_pythons():
# TODO implementation
return ['2.6.9', '2.7.13', '3.3.6', '3.4.6', '3.5.3', '3.6.2']
def _highest_minor(pythons):
highest = pythons[-1]
major, minor, patch = highest.split('.', 2)
return flo('{major}.{minor}')
@task
def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}'))
def _local_needs_pythons(*args, **kwargs):
with warn_only():
res = local(*args, **kwargs)
print(res)
if res.return_code == 127:
print(cyan('missing python version(s), '
'run fabric task `pythons`:\n\n '
'fab pythons\n'))
sys.exit(1)
@task
def tox(args=''):
'''Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
'''
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor_python: '3.6'
highest_minor_python = _highest_minor(latest_pythons)
_local_needs_pythons(flo('cd {basedir} && '
'python{highest_minor_python} -m tox {args}'))
@task
def test(args='', py=None):
'''Run unit tests.
Keyword-Args:
args: Optional arguments passed to pytest
py: python version to run the tests against
Example:
fab test:args=-s,py=py27
'''
basedir = dirname(__file__)
if py is None:
# e.g. envlist: 'envlist = py26,py27,py33,py34,py35,py36'
envlist = local(flo('cd {basedir} && grep envlist tox.ini'),
capture=True)
_, py = envlist.rsplit(',', 1)
with warn_only():
res = local(flo('cd {basedir} && '
"PYTHONPATH='.' .tox/{py}/bin/python -m pytest {args}"))
print(res)
if res.return_code == 127:
print(cyan('missing tox virtualenv, '
'run fabric task `tox`:\n\n '
'fab tox\n'))
sys.exit(1)
@task
def pypi():
'''Build package and upload to pypi.'''
if query_yes_no('version updated in setup.py?'):
print(cyan('\n## clean-up\n'))
execute(clean)
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor: '3.6'
highest_minor = _highest_minor(latest_pythons)
python = flo('python{highest_minor}')
print(cyan('\n## build package'))
_local_needs_pythons(flo('cd {basedir} && {python} setup.py sdist'))
print(cyan('\n## upload package'))
local(flo('cd {basedir} && {python} -m twine upload dist/*'))
|
theno/utlz | fabfile.py | _wrap_with | python | def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner | Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/fabfile.py#L22-L36 | null | import inspect
import sys
from os.path import dirname
from fabric.api import execute, local, task
from fabric.context_managers import warn_only, quiet
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
cyan = _wrap_with('36')
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
@task
def clean(deltox=False):
'''Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
'''
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/'))
def _pyenv_exists():
with quiet():
res = local('pyenv')
if res.return_code == 127:
return False
return True
def _determine_latest_pythons():
# TODO implementation
return ['2.6.9', '2.7.13', '3.3.6', '3.4.6', '3.5.3', '3.6.2']
def _highest_minor(pythons):
highest = pythons[-1]
major, minor, patch = highest.split('.', 2)
return flo('{major}.{minor}')
@task
def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}'))
def _local_needs_pythons(*args, **kwargs):
with warn_only():
res = local(*args, **kwargs)
print(res)
if res.return_code == 127:
print(cyan('missing python version(s), '
'run fabric task `pythons`:\n\n '
'fab pythons\n'))
sys.exit(1)
@task
def tox(args=''):
'''Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
'''
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor_python: '3.6'
highest_minor_python = _highest_minor(latest_pythons)
_local_needs_pythons(flo('cd {basedir} && '
'python{highest_minor_python} -m tox {args}'))
@task
def test(args='', py=None):
'''Run unit tests.
Keyword-Args:
args: Optional arguments passed to pytest
py: python version to run the tests against
Example:
fab test:args=-s,py=py27
'''
basedir = dirname(__file__)
if py is None:
# e.g. envlist: 'envlist = py26,py27,py33,py34,py35,py36'
envlist = local(flo('cd {basedir} && grep envlist tox.ini'),
capture=True)
_, py = envlist.rsplit(',', 1)
with warn_only():
res = local(flo('cd {basedir} && '
"PYTHONPATH='.' .tox/{py}/bin/python -m pytest {args}"))
print(res)
if res.return_code == 127:
print(cyan('missing tox virtualenv, '
'run fabric task `tox`:\n\n '
'fab tox\n'))
sys.exit(1)
@task
def pypi():
'''Build package and upload to pypi.'''
if query_yes_no('version updated in setup.py?'):
print(cyan('\n## clean-up\n'))
execute(clean)
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor: '3.6'
highest_minor = _highest_minor(latest_pythons)
python = flo('python{highest_minor}')
print(cyan('\n## build package'))
_local_needs_pythons(flo('cd {basedir} && {python} setup.py sdist'))
print(cyan('\n## upload package'))
local(flo('cd {basedir} && {python} -m twine upload dist/*'))
|
theno/utlz | fabfile.py | clean | python | def clean(deltox=False):
'''Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
'''
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/')) | Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/fabfile.py#L76-L112 | [
"def flo(string):\n '''Return the string given by param formatted with the callers locals.'''\n callers_locals = {}\n frame = inspect.currentframe()\n try:\n outerframe = frame.f_back\n callers_locals = outerframe.f_locals\n finally:\n del frame\n return string.format(**callers_locals)\n",
"def inner(text, bold=False):\n '''Inner color function.'''\n code = color_code\n if bold:\n code = flo(\"1;{code}\")\n return flo('\\033[{code}m{text}\\033[0m')\n"
] | import inspect
import sys
from os.path import dirname
from fabric.api import execute, local, task
from fabric.context_managers import warn_only, quiet
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
cyan = _wrap_with('36')
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
@task
def _pyenv_exists():
with quiet():
res = local('pyenv')
if res.return_code == 127:
return False
return True
def _determine_latest_pythons():
# TODO implementation
return ['2.6.9', '2.7.13', '3.3.6', '3.4.6', '3.5.3', '3.6.2']
def _highest_minor(pythons):
highest = pythons[-1]
major, minor, patch = highest.split('.', 2)
return flo('{major}.{minor}')
@task
def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}'))
def _local_needs_pythons(*args, **kwargs):
with warn_only():
res = local(*args, **kwargs)
print(res)
if res.return_code == 127:
print(cyan('missing python version(s), '
'run fabric task `pythons`:\n\n '
'fab pythons\n'))
sys.exit(1)
@task
def tox(args=''):
'''Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
'''
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor_python: '3.6'
highest_minor_python = _highest_minor(latest_pythons)
_local_needs_pythons(flo('cd {basedir} && '
'python{highest_minor_python} -m tox {args}'))
@task
def test(args='', py=None):
'''Run unit tests.
Keyword-Args:
args: Optional arguments passed to pytest
py: python version to run the tests against
Example:
fab test:args=-s,py=py27
'''
basedir = dirname(__file__)
if py is None:
# e.g. envlist: 'envlist = py26,py27,py33,py34,py35,py36'
envlist = local(flo('cd {basedir} && grep envlist tox.ini'),
capture=True)
_, py = envlist.rsplit(',', 1)
with warn_only():
res = local(flo('cd {basedir} && '
"PYTHONPATH='.' .tox/{py}/bin/python -m pytest {args}"))
print(res)
if res.return_code == 127:
print(cyan('missing tox virtualenv, '
'run fabric task `tox`:\n\n '
'fab tox\n'))
sys.exit(1)
@task
def pypi():
'''Build package and upload to pypi.'''
if query_yes_no('version updated in setup.py?'):
print(cyan('\n## clean-up\n'))
execute(clean)
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor: '3.6'
highest_minor = _highest_minor(latest_pythons)
python = flo('python{highest_minor}')
print(cyan('\n## build package'))
_local_needs_pythons(flo('cd {basedir} && {python} setup.py sdist'))
print(cyan('\n## upload package'))
local(flo('cd {basedir} && {python} -m twine upload dist/*'))
|
theno/utlz | fabfile.py | pythons | python | def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}')) | Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/fabfile.py#L135-L167 | [
"def flo(string):\n '''Return the string given by param formatted with the callers locals.'''\n callers_locals = {}\n frame = inspect.currentframe()\n try:\n outerframe = frame.f_back\n callers_locals = outerframe.f_locals\n finally:\n del frame\n return string.format(**callers_locals)\n",
"def _pyenv_exists():\n with quiet():\n res = local('pyenv')\n if res.return_code == 127:\n return False\n return True\n",
"def _determine_latest_pythons():\n # TODO implementation\n return ['2.6.9', '2.7.13', '3.3.6', '3.4.6', '3.5.3', '3.6.2']\n",
"def inner(text, bold=False):\n '''Inner color function.'''\n code = color_code\n if bold:\n code = flo(\"1;{code}\")\n return flo('\\033[{code}m{text}\\033[0m')\n"
] | import inspect
import sys
from os.path import dirname
from fabric.api import execute, local, task
from fabric.context_managers import warn_only, quiet
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
cyan = _wrap_with('36')
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
@task
def clean(deltox=False):
'''Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
'''
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/'))
def _pyenv_exists():
with quiet():
res = local('pyenv')
if res.return_code == 127:
return False
return True
def _determine_latest_pythons():
# TODO implementation
return ['2.6.9', '2.7.13', '3.3.6', '3.4.6', '3.5.3', '3.6.2']
def _highest_minor(pythons):
highest = pythons[-1]
major, minor, patch = highest.split('.', 2)
return flo('{major}.{minor}')
@task
def _local_needs_pythons(*args, **kwargs):
with warn_only():
res = local(*args, **kwargs)
print(res)
if res.return_code == 127:
print(cyan('missing python version(s), '
'run fabric task `pythons`:\n\n '
'fab pythons\n'))
sys.exit(1)
@task
def tox(args=''):
'''Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
'''
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor_python: '3.6'
highest_minor_python = _highest_minor(latest_pythons)
_local_needs_pythons(flo('cd {basedir} && '
'python{highest_minor_python} -m tox {args}'))
@task
def test(args='', py=None):
'''Run unit tests.
Keyword-Args:
args: Optional arguments passed to pytest
py: python version to run the tests against
Example:
fab test:args=-s,py=py27
'''
basedir = dirname(__file__)
if py is None:
# e.g. envlist: 'envlist = py26,py27,py33,py34,py35,py36'
envlist = local(flo('cd {basedir} && grep envlist tox.ini'),
capture=True)
_, py = envlist.rsplit(',', 1)
with warn_only():
res = local(flo('cd {basedir} && '
"PYTHONPATH='.' .tox/{py}/bin/python -m pytest {args}"))
print(res)
if res.return_code == 127:
print(cyan('missing tox virtualenv, '
'run fabric task `tox`:\n\n '
'fab tox\n'))
sys.exit(1)
@task
def pypi():
'''Build package and upload to pypi.'''
if query_yes_no('version updated in setup.py?'):
print(cyan('\n## clean-up\n'))
execute(clean)
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor: '3.6'
highest_minor = _highest_minor(latest_pythons)
python = flo('python{highest_minor}')
print(cyan('\n## build package'))
_local_needs_pythons(flo('cd {basedir} && {python} setup.py sdist'))
print(cyan('\n## upload package'))
local(flo('cd {basedir} && {python} -m twine upload dist/*'))
|
theno/utlz | fabfile.py | tox | python | def tox(args=''):
'''Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
'''
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor_python: '3.6'
highest_minor_python = _highest_minor(latest_pythons)
_local_needs_pythons(flo('cd {basedir} && '
'python{highest_minor_python} -m tox {args}')) | Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r' | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/fabfile.py#L182-L200 | [
"def flo(string):\n '''Return the string given by param formatted with the callers locals.'''\n callers_locals = {}\n frame = inspect.currentframe()\n try:\n outerframe = frame.f_back\n callers_locals = outerframe.f_locals\n finally:\n del frame\n return string.format(**callers_locals)\n",
"def _determine_latest_pythons():\n # TODO implementation\n return ['2.6.9', '2.7.13', '3.3.6', '3.4.6', '3.5.3', '3.6.2']\n",
"def _highest_minor(pythons):\n highest = pythons[-1]\n major, minor, patch = highest.split('.', 2)\n return flo('{major}.{minor}')\n",
"def _local_needs_pythons(*args, **kwargs):\n with warn_only():\n res = local(*args, **kwargs)\n print(res)\n if res.return_code == 127:\n print(cyan('missing python version(s), '\n 'run fabric task `pythons`:\\n\\n '\n 'fab pythons\\n'))\n sys.exit(1)\n"
] | import inspect
import sys
from os.path import dirname
from fabric.api import execute, local, task
from fabric.context_managers import warn_only, quiet
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
cyan = _wrap_with('36')
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
@task
def clean(deltox=False):
'''Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
'''
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/'))
def _pyenv_exists():
with quiet():
res = local('pyenv')
if res.return_code == 127:
return False
return True
def _determine_latest_pythons():
# TODO implementation
return ['2.6.9', '2.7.13', '3.3.6', '3.4.6', '3.5.3', '3.6.2']
def _highest_minor(pythons):
highest = pythons[-1]
major, minor, patch = highest.split('.', 2)
return flo('{major}.{minor}')
@task
def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}'))
def _local_needs_pythons(*args, **kwargs):
with warn_only():
res = local(*args, **kwargs)
print(res)
if res.return_code == 127:
print(cyan('missing python version(s), '
'run fabric task `pythons`:\n\n '
'fab pythons\n'))
sys.exit(1)
@task
@task
def test(args='', py=None):
'''Run unit tests.
Keyword-Args:
args: Optional arguments passed to pytest
py: python version to run the tests against
Example:
fab test:args=-s,py=py27
'''
basedir = dirname(__file__)
if py is None:
# e.g. envlist: 'envlist = py26,py27,py33,py34,py35,py36'
envlist = local(flo('cd {basedir} && grep envlist tox.ini'),
capture=True)
_, py = envlist.rsplit(',', 1)
with warn_only():
res = local(flo('cd {basedir} && '
"PYTHONPATH='.' .tox/{py}/bin/python -m pytest {args}"))
print(res)
if res.return_code == 127:
print(cyan('missing tox virtualenv, '
'run fabric task `tox`:\n\n '
'fab tox\n'))
sys.exit(1)
@task
def pypi():
'''Build package and upload to pypi.'''
if query_yes_no('version updated in setup.py?'):
print(cyan('\n## clean-up\n'))
execute(clean)
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor: '3.6'
highest_minor = _highest_minor(latest_pythons)
python = flo('python{highest_minor}')
print(cyan('\n## build package'))
_local_needs_pythons(flo('cd {basedir} && {python} setup.py sdist'))
print(cyan('\n## upload package'))
local(flo('cd {basedir} && {python} -m twine upload dist/*'))
|
theno/utlz | fabfile.py | pypi | python | def pypi():
'''Build package and upload to pypi.'''
if query_yes_no('version updated in setup.py?'):
print(cyan('\n## clean-up\n'))
execute(clean)
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor: '3.6'
highest_minor = _highest_minor(latest_pythons)
python = flo('python{highest_minor}')
print(cyan('\n## build package'))
_local_needs_pythons(flo('cd {basedir} && {python} setup.py sdist'))
print(cyan('\n## upload package'))
local(flo('cd {basedir} && {python} -m twine upload dist/*')) | Build package and upload to pypi. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/fabfile.py#L235-L253 | [
"def flo(string):\n '''Return the string given by param formatted with the callers locals.'''\n callers_locals = {}\n frame = inspect.currentframe()\n try:\n outerframe = frame.f_back\n callers_locals = outerframe.f_locals\n finally:\n del frame\n return string.format(**callers_locals)\n",
"def query_yes_no(question, default=\"yes\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\", or None (which means an answer\n of the user is required).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True, '1': True,\n \"no\": False, \"n\": False, '0': False, }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n",
"def _determine_latest_pythons():\n # TODO implementation\n return ['2.6.9', '2.7.13', '3.3.6', '3.4.6', '3.5.3', '3.6.2']\n",
"def _highest_minor(pythons):\n highest = pythons[-1]\n major, minor, patch = highest.split('.', 2)\n return flo('{major}.{minor}')\n",
"def _local_needs_pythons(*args, **kwargs):\n with warn_only():\n res = local(*args, **kwargs)\n print(res)\n if res.return_code == 127:\n print(cyan('missing python version(s), '\n 'run fabric task `pythons`:\\n\\n '\n 'fab pythons\\n'))\n sys.exit(1)\n",
"def inner(text, bold=False):\n '''Inner color function.'''\n code = color_code\n if bold:\n code = flo(\"1;{code}\")\n return flo('\\033[{code}m{text}\\033[0m')\n"
] | import inspect
import sys
from os.path import dirname
from fabric.api import execute, local, task
from fabric.context_managers import warn_only, quiet
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
cyan = _wrap_with('36')
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
@task
def clean(deltox=False):
'''Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
'''
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/'))
def _pyenv_exists():
with quiet():
res = local('pyenv')
if res.return_code == 127:
return False
return True
def _determine_latest_pythons():
# TODO implementation
return ['2.6.9', '2.7.13', '3.3.6', '3.4.6', '3.5.3', '3.6.2']
def _highest_minor(pythons):
highest = pythons[-1]
major, minor, patch = highest.split('.', 2)
return flo('{major}.{minor}')
@task
def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}'))
def _local_needs_pythons(*args, **kwargs):
with warn_only():
res = local(*args, **kwargs)
print(res)
if res.return_code == 127:
print(cyan('missing python version(s), '
'run fabric task `pythons`:\n\n '
'fab pythons\n'))
sys.exit(1)
@task
def tox(args=''):
'''Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
'''
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor_python: '3.6'
highest_minor_python = _highest_minor(latest_pythons)
_local_needs_pythons(flo('cd {basedir} && '
'python{highest_minor_python} -m tox {args}'))
@task
def test(args='', py=None):
'''Run unit tests.
Keyword-Args:
args: Optional arguments passed to pytest
py: python version to run the tests against
Example:
fab test:args=-s,py=py27
'''
basedir = dirname(__file__)
if py is None:
# e.g. envlist: 'envlist = py26,py27,py33,py34,py35,py36'
envlist = local(flo('cd {basedir} && grep envlist tox.ini'),
capture=True)
_, py = envlist.rsplit(',', 1)
with warn_only():
res = local(flo('cd {basedir} && '
"PYTHONPATH='.' .tox/{py}/bin/python -m pytest {args}"))
print(res)
if res.return_code == 127:
print(cyan('missing tox virtualenv, '
'run fabric task `tox`:\n\n '
'fab tox\n'))
sys.exit(1)
@task
|
theno/utlz | utlz/cmd.py | run_cmd | python | def run_cmd(cmd, input=None, timeout=30, max_try=3, num_try=1):
'''Run command `cmd`.
It's like that, and that's the way it is.
'''
if type(cmd) == str:
cmd = cmd.split()
process = subprocess.Popen(cmd,
stdin=open('/dev/null', 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
communicate_has_timeout = func_has_arg(func=process.communicate,
arg='timeout')
exception = Exception
if communicate_has_timeout:
exception = subprocess.TimeoutExpired # python 3.x
stdout = stderr = b''
exitcode = None
try:
if communicate_has_timeout:
# python 3.x
stdout, stderr = process.communicate(input, timeout)
exitcode = process.wait()
else:
# python 2.x
if timeout is None:
stdout, stderr = process.communicate(input)
exitcode = process.wait()
else:
# thread-recipe: https://stackoverflow.com/a/4825933
def target():
# closure-recipe: https://stackoverflow.com/a/23558809
target.out, target.err = process.communicate(input)
import threading
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
process.terminate()
thread.join()
exitcode = None
else:
exitcode = process.wait()
stdout = target.out
stderr = target.err
except exception:
if num_try < max_try:
return run_cmd(cmd, input, timeout, max_try, num_try+1)
else:
return CmdResult(exitcode, stdout, stderr, cmd, input)
return CmdResult(exitcode, stdout, stderr, cmd, input) | Run command `cmd`.
It's like that, and that's the way it is. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/cmd.py#L22-L72 | [
"def func_has_arg(func, arg):\n '''Return True if an argument `arg` exists for function `func`, else False.\n '''\n return arg in inspect.getargspec(func).args\n",
"def run_cmd(cmd, input=None, timeout=30, max_try=3, num_try=1):\n '''Run command `cmd`.\n\n It's like that, and that's the way it is.\n '''\n if type(cmd) == str:\n cmd = cmd.split()\n process = subprocess.Popen(cmd,\n stdin=open('/dev/null', 'r'),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n communicate_has_timeout = func_has_arg(func=process.communicate,\n arg='timeout')\n exception = Exception\n if communicate_has_timeout:\n exception = subprocess.TimeoutExpired # python 3.x\n stdout = stderr = b''\n exitcode = None\n try:\n if communicate_has_timeout:\n # python 3.x\n stdout, stderr = process.communicate(input, timeout)\n exitcode = process.wait()\n else:\n # python 2.x\n if timeout is None:\n stdout, stderr = process.communicate(input)\n exitcode = process.wait()\n else:\n # thread-recipe: https://stackoverflow.com/a/4825933\n def target():\n # closure-recipe: https://stackoverflow.com/a/23558809\n target.out, target.err = process.communicate(input)\n import threading\n thread = threading.Thread(target=target)\n thread.start()\n thread.join(timeout)\n if thread.is_alive():\n process.terminate()\n thread.join()\n exitcode = None\n else:\n exitcode = process.wait()\n stdout = target.out\n stderr = target.err\n except exception:\n if num_try < max_try:\n return run_cmd(cmd, input, timeout, max_try, num_try+1)\n else:\n return CmdResult(exitcode, stdout, stderr, cmd, input)\n return CmdResult(exitcode, stdout, stderr, cmd, input)\n"
] | import subprocess
from utlz import func_has_arg, namedtuple
CmdResult = namedtuple(
typename='CmdResult',
field_names=[
'exitcode',
'stdout', # type: bytes
'stderr', # type: bytes
'cmd',
'input',
],
lazy_vals={
'stdout_str': lambda self: self.stdout.decode('utf-8'),
'stderr_str': lambda self: self.stderr.decode('utf-8'),
}
)
|
theno/utlz | utlz/__init__.py | first_paragraph | python | def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res | Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE> | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L67-L109 | null | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | print_doc1 | python | def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator | Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L114-L173 | null | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | print_full_name | python | def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator | Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``). | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L177-L208 | null | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | filled_out_template_str | python | def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template | Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged' | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L278-L303 | null | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | filled_out_template | python | def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res | Return content of file filename with applied substitutions. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L307-L313 | [
"def filled_out_template_str(template, **substitutions):\n '''Return str template with applied substitutions.\n\n Example:\n >>> template = 'Asyl for {{name}} {{surname}}!'\n >>> filled_out_template_str(template, name='Edward', surname='Snowden')\n 'Asyl for Edward Snowden!'\n\n >>> template = '[[[foo]]] was substituted by {{foo}}'\n >>> filled_out_template_str(template, foo='bar')\n '{{foo}} was substituted by bar'\n\n >>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'\n >>> filled_out_template_str(template, curly='remains unchanged')\n 'names wrapped by {single} {curly} {braces} remains unchanged'\n '''\n template = template.replace('{', '{{')\n template = template.replace('}', '}}')\n template = template.replace('{{{{', '{')\n template = template.replace('}}}}', '}')\n template = template.format(**substitutions)\n template = template.replace('{{', '{')\n template = template.replace('}}', '}')\n template = template.replace('[[[', '{{')\n template = template.replace(']]]', '}}')\n return template\n"
] | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | update_or_append_line | python | def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated | Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file). | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L318-L354 | null | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | comment_out_line | python | def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False) | Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case). | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L358-L366 | [
"def update_or_append_line(filename, prefix, new_line, keep_backup=True,\n append=True):\n '''Search in file 'filename' for a line starting with 'prefix' and replace\n the line by 'new_line'.\n\n If a line starting with 'prefix' not exists 'new_line' will be appended.\n If the file not exists, it will be created.\n\n Return False if new_line was appended, else True (i.e. if the prefix was\n found within of the file).\n '''\n same_line_exists, line_updated = False, False\n filename = os.path.expanduser(filename)\n if os.path.isfile(filename):\n backup = filename + '~'\n shutil.move(filename, backup)\n # with open(filename, 'w') as dest, open(backup, 'r') as source:\n with open(filename, 'w') as dest:\n with open(backup, 'r') as source:\n # try update..\n for line in source:\n if line == new_line:\n same_line_exists = True\n if line.startswith(prefix):\n dest.write(new_line + '\\n')\n line_updated = True\n else:\n dest.write(line)\n # ..or append\n if not (same_line_exists or line_updated) and append:\n dest.write(new_line + '\\n')\n if not keep_backup:\n os.remove(backup)\n else:\n with open(filename, 'w') as dest:\n dest.write(new_line + '\\n')\n return same_line_exists or line_updated\n"
] | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | uncomment_or_update_or_append_line | python | def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True) | Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L370-L382 | [
"def update_or_append_line(filename, prefix, new_line, keep_backup=True,\n append=True):\n '''Search in file 'filename' for a line starting with 'prefix' and replace\n the line by 'new_line'.\n\n If a line starting with 'prefix' not exists 'new_line' will be appended.\n If the file not exists, it will be created.\n\n Return False if new_line was appended, else True (i.e. if the prefix was\n found within of the file).\n '''\n same_line_exists, line_updated = False, False\n filename = os.path.expanduser(filename)\n if os.path.isfile(filename):\n backup = filename + '~'\n shutil.move(filename, backup)\n # with open(filename, 'w') as dest, open(backup, 'r') as source:\n with open(filename, 'w') as dest:\n with open(backup, 'r') as source:\n # try update..\n for line in source:\n if line == new_line:\n same_line_exists = True\n if line.startswith(prefix):\n dest.write(new_line + '\\n')\n line_updated = True\n else:\n dest.write(line)\n # ..or append\n if not (same_line_exists or line_updated) and append:\n dest.write(new_line + '\\n')\n if not keep_backup:\n os.remove(backup)\n else:\n with open(filename, 'w') as dest:\n dest.write(new_line + '\\n')\n return same_line_exists or line_updated\n"
] | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | convert_unicode_2_utf8 | python | def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input | Return a copy of `input` with every str component encoded from unicode to
utf-8. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L387-L417 | null | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | load_json | python | def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data | Return the json-file data, with all strings utf-8 encoded. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L420-L438 | [
"def convert_unicode_2_utf8(input):\n '''Return a copy of `input` with every str component encoded from unicode to\n utf-8.\n '''\n if isinstance(input, dict):\n try:\n # python-2.6\n return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))\n for key, value\n in input.iteritems())\n except AttributeError:\n # since python-2.7 cf. http://stackoverflow.com/a/1747827\n # [the ugly eval('...') is required for a valid syntax on\n # python-2.6, cf. http://stackoverflow.com/a/25049535]\n return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)\n for key, value\n in input.items()}''')\n elif isinstance(input, list):\n return [convert_unicode_2_utf8(element) for element in input]\n # elif order relevant: python2 vs. python3\n # cf. http://stackoverflow.com/a/19877309\n elif isinstance(input, str):\n return input\n else:\n try:\n if eval('''isinstance(input, unicode)'''):\n return input.encode('utf-8')\n except NameError:\n # unicode does not exist in python-3.x\n pass\n return input\n"
] | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | write_json | python | def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close() | Write the python data structure as a json-Object to filename. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L441-L455 | null | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | text_with_newlines | python | def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text | Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L468-L481 | null | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get)
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
theno/utlz | utlz/__init__.py | lazy_val | python | def lazy_val(func, with_del_hook=False):
'''A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access.
'''
def hook_for(that):
try:
orig_del = that.__del__
except AttributeError:
orig_del = None
def del_hook(*args, **kwargs):
del that._cache[id(that)]
del that._del_hook_cache[id(that)]
if orig_del is not None:
orig_del(that, *args, **kwargs)
try:
if orig_del is not None:
that.__del__ = del_hook
except AttributeError:
# that.__del__ is a class property and cannot be changed by instance
orig_del = None
return del_hook
def add_to_del_hook_cache(that):
if with_del_hook:
try:
that._del_hook_cache[id(that)] = hook_for(that)
except AttributeError:
# when that._del_hook_cache not exists, it means it is not a
# class property. Then, we don't need a del_hook().
pass
@functools.wraps(func)
def get(self):
try:
return self._cache[id(self)][func]
except AttributeError:
self._cache = {id(self): {}, }
add_to_del_hook_cache(self)
except KeyError:
try:
self._cache[id(self)]
except KeyError:
self._cache[id(self)] = {}
add_to_del_hook_cache(self)
val = self._cache[id(self)][func] = func(self)
return val
return property(get) | A memoize decorator for class properties.
Return a cached property that is calculated by function `func` on first
access. | train | https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/utlz/__init__.py#L492-L544 | null | # -*- coding: utf-8 -*-
import collections
import functools
import gzip
import json
import inspect
import os.path
import shutil
import sys
import struct
import time
from functools import wraps
from utlz._version import __version__
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
# TODO: unit tests
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
# TODO: unit tests
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
# TODO: unit tests
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
# TODO: unit tests
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# TODO: unit tests
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def _get_input():
try:
return raw_input() # Python-2.*
except NameError:
return input() # Python-3.*
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = _get_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = _get_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
# TODO: unit tests
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for Edward Snowden!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
# TODO: unit tests
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
# TODO: unit tests
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
# TODO: unit tests
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
# TODO: unit tests
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# idea comes from http://stackoverflow.com/a/13105359
# TODO: unit tests
def convert_unicode_2_utf8(input):
'''Return a copy of `input` with every str component encoded from unicode to
utf-8.
'''
if isinstance(input, dict):
try:
# python-2.6
return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value))
for key, value
in input.iteritems())
except AttributeError:
# since python-2.7 cf. http://stackoverflow.com/a/1747827
# [the ugly eval('...') is required for a valid syntax on
# python-2.6, cf. http://stackoverflow.com/a/25049535]
return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value)
for key, value
in input.items()}''')
elif isinstance(input, list):
return [convert_unicode_2_utf8(element) for element in input]
# elif order relevant: python2 vs. python3
# cf. http://stackoverflow.com/a/19877309
elif isinstance(input, str):
return input
else:
try:
if eval('''isinstance(input, unicode)'''):
return input.encode('utf-8')
except NameError:
# unicode does not exist in python-3.x
pass
return input
def load_json(filename, gzip_mode=False):
'''Return the json-file data, with all strings utf-8 encoded.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'rt') as fh:
data = json.load(fh)
data = convert_unicode_2_utf8(data)
return data
except AttributeError:
# Python-2.6
fh = open_file(filename, 'rt')
data = json.load(fh)
fh.close()
data = convert_unicode_2_utf8(data)
return data
def write_json(data, filename, gzip_mode=False):
'''Write the python data structure as a json-Object to filename.'''
open_file = open
if gzip_mode:
open_file = gzip.open
try:
with open_file(filename, 'wt') as fh:
json.dump(obj=data, fp=fh, sort_keys=True)
except AttributeError:
# Python-2.6
fh = open_file(filename, 'wt')
json.dump(obj=data, fp=fh, sort_keys=True)
fh.close()
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def flat_list(list_of_lists):
'''Return a simple list out of a list of lists.'''
return [item for sublist in list_of_lists for item in sublist]
def text_with_newlines(text, line_length=78, newline='\n'):
'''Return text with a `newline` inserted after each `line_length` char.
Return `text` unchanged if line_length == 0.
'''
if line_length > 0:
if len(text) <= line_length:
return text
else:
return newline.join([text[idx:idx+line_length]
for idx
in range(0, len(text), line_length)])
else:
return text
def func_has_arg(func, arg):
'''Return True if an argument `arg` exists for function `func`, else False.
'''
return arg in inspect.getargspec(func).args
# originally written by Giampaolo Rodolà and Ken Seehof
# https://code.activestate.com/recipes/576563-cached-property/#c3
# namedtuple with defaults and lazy_vals
def namedtuple(typename, field_names, lazy_vals=None, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg in field_names')
field_names_without_defaults.append(name)
_class = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
_class.__new__.__defaults__ = tuple(defaults)
if lazy_vals is not None:
# namedtuple instances are tuples and so they are immutable. We cannot
# add an instance property _cache. So we create one global _cache dict
# and one _del_hook_cache dict as class properties for storing the lazy
# vals and the del-hooks and enable the del_hook-functionality by
# adding a __del__ attribute function wich calls the del-hook.
_class._cache = {}
_class._del_hook_cache = {}
def noop(): pass
_class.__del__ = lambda self: self._del_hook_cache.get(id(self), noop)()
for attr_name, func in lazy_vals.items():
setattr(_class, attr_name,
lazy_val(func, with_del_hook=True))
return _class
# TODO unit test
class StructContext(object):
'''An instance of this is a file like object which enables access of an
(data) struct.
'''
def __init__(self, data_struct):
self.data_struct = data_struct
self.offset = 0
def __enter__(self):
self.seek(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.seek(0)
def seek(self, offset):
self.offset = offset
def read(self, fmt):
data = struct.unpack_from(fmt, self.data_struct, self.offset)
self.offset += struct.calcsize(fmt)
if len(data) == 1:
return data[0]
return data
@lazy_val
def length(self):
return len(self.data_struct)
# https://stackoverflow.com/a/15190306
# TODO: unit tests
class timeout(object):
'''timeout context.
Usage example:
>>> with timeout(0.1) as t:
... while True:
... if t.timed_out:
... break
... print('.')
... time.sleep(0.02)
.
.
.
.
.
For more usage, see https://stackoverflow.com/a/15190306
'''
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.open | python | def open(self):
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock() | Open the connection wit the device. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L104-L118 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver._lock | python | def _lock(self):
if not self.locked:
self.device.cu.lock()
self.locked = True | Lock the config DB. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L126-L130 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver._unlock | python | def _unlock(self):
if self.locked:
self.device.cu.unlock()
self.locked = False | Unlock the config DB. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L132-L136 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.compare_config | python | def compare_config(self):
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip() | Compare candidate config with running. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L229-L236 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.commit_config | python | def commit_config(self):
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock() | Commit configuration. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L238-L242 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.