repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
xflr6/concepts
concepts/__init__.py
load_csv
def load_csv(filename, dialect='excel', encoding='utf-8'): """Load and return formal context from CSV file. Args: filename: Path to the CSV file to load the context from. dialect: Syntax variant of the CSV file (``'excel'``, ``'excel-tab'``). encoding (str): Encoding of the file (``'utf-8'``, ``'latin1'``, ``'ascii'``, ...). Example: >>> load_csv('examples/vowels.csv') # doctest: +ELLIPSIS <Context object mapping 12 objects to 8 properties [a717eee4] at 0x...> """ return Context.fromfile(filename, 'csv', encoding, dialect=dialect)
python
def load_csv(filename, dialect='excel', encoding='utf-8'): """Load and return formal context from CSV file. Args: filename: Path to the CSV file to load the context from. dialect: Syntax variant of the CSV file (``'excel'``, ``'excel-tab'``). encoding (str): Encoding of the file (``'utf-8'``, ``'latin1'``, ``'ascii'``, ...). Example: >>> load_csv('examples/vowels.csv') # doctest: +ELLIPSIS <Context object mapping 12 objects to 8 properties [a717eee4] at 0x...> """ return Context.fromfile(filename, 'csv', encoding, dialect=dialect)
[ "def", "load_csv", "(", "filename", ",", "dialect", "=", "'excel'", ",", "encoding", "=", "'utf-8'", ")", ":", "return", "Context", ".", "fromfile", "(", "filename", ",", "'csv'", ",", "encoding", ",", "dialect", "=", "dialect", ")" ]
Load and return formal context from CSV file. Args: filename: Path to the CSV file to load the context from. dialect: Syntax variant of the CSV file (``'excel'``, ``'excel-tab'``). encoding (str): Encoding of the file (``'utf-8'``, ``'latin1'``, ``'ascii'``, ...). Example: >>> load_csv('examples/vowels.csv') # doctest: +ELLIPSIS <Context object mapping 12 objects to 8 properties [a717eee4] at 0x...>
[ "Load", "and", "return", "formal", "context", "from", "CSV", "file", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/__init__.py#L61-L73
train
24,000
xflr6/concepts
concepts/definitions.py
ensure_compatible
def ensure_compatible(left, right): """Raise an informative ``ValueError`` if the two definitions disagree.""" conflicts = list(conflicting_pairs(left, right)) if conflicts: raise ValueError('conflicting values for object/property pairs: %r' % conflicts)
python
def ensure_compatible(left, right): """Raise an informative ``ValueError`` if the two definitions disagree.""" conflicts = list(conflicting_pairs(left, right)) if conflicts: raise ValueError('conflicting values for object/property pairs: %r' % conflicts)
[ "def", "ensure_compatible", "(", "left", ",", "right", ")", ":", "conflicts", "=", "list", "(", "conflicting_pairs", "(", "left", ",", "right", ")", ")", "if", "conflicts", ":", "raise", "ValueError", "(", "'conflicting values for object/property pairs: %r'", "%",...
Raise an informative ``ValueError`` if the two definitions disagree.
[ "Raise", "an", "informative", "ValueError", "if", "the", "two", "definitions", "disagree", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L211-L215
train
24,001
xflr6/concepts
concepts/definitions.py
Definition.rename_object
def rename_object(self, old, new): """Replace the name of an object by a new one.""" self._objects.replace(old, new) pairs = self._pairs pairs |= {(new, p) for p in self._properties if (old, p) in pairs and not pairs.remove((old, p))}
python
def rename_object(self, old, new): """Replace the name of an object by a new one.""" self._objects.replace(old, new) pairs = self._pairs pairs |= {(new, p) for p in self._properties if (old, p) in pairs and not pairs.remove((old, p))}
[ "def", "rename_object", "(", "self", ",", "old", ",", "new", ")", ":", "self", ".", "_objects", ".", "replace", "(", "old", ",", "new", ")", "pairs", "=", "self", ".", "_pairs", "pairs", "|=", "{", "(", "new", ",", "p", ")", "for", "p", "in", "...
Replace the name of an object by a new one.
[ "Replace", "the", "name", "of", "an", "object", "by", "a", "new", "one", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L317-L322
train
24,002
xflr6/concepts
concepts/definitions.py
Definition.rename_property
def rename_property(self, old, new): """Replace the name of a property by a new one.""" self._properties.replace(old, new) pairs = self._pairs pairs |= {(o, new) for o in self._objects if (o, old) in pairs and not pairs.remove((o, old))}
python
def rename_property(self, old, new): """Replace the name of a property by a new one.""" self._properties.replace(old, new) pairs = self._pairs pairs |= {(o, new) for o in self._objects if (o, old) in pairs and not pairs.remove((o, old))}
[ "def", "rename_property", "(", "self", ",", "old", ",", "new", ")", ":", "self", ".", "_properties", ".", "replace", "(", "old", ",", "new", ")", "pairs", "=", "self", ".", "_pairs", "pairs", "|=", "{", "(", "o", ",", "new", ")", "for", "o", "in"...
Replace the name of a property by a new one.
[ "Replace", "the", "name", "of", "a", "property", "by", "a", "new", "one", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L324-L329
train
24,003
xflr6/concepts
concepts/definitions.py
Definition.add_object
def add_object(self, obj, properties=()): """Add an object to the definition and add ``properties`` as related.""" self._objects.add(obj) self._properties |= properties self._pairs.update((obj, p) for p in properties)
python
def add_object(self, obj, properties=()): """Add an object to the definition and add ``properties`` as related.""" self._objects.add(obj) self._properties |= properties self._pairs.update((obj, p) for p in properties)
[ "def", "add_object", "(", "self", ",", "obj", ",", "properties", "=", "(", ")", ")", ":", "self", ".", "_objects", ".", "add", "(", "obj", ")", "self", ".", "_properties", "|=", "properties", "self", ".", "_pairs", ".", "update", "(", "(", "obj", "...
Add an object to the definition and add ``properties`` as related.
[ "Add", "an", "object", "to", "the", "definition", "and", "add", "properties", "as", "related", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L350-L354
train
24,004
xflr6/concepts
concepts/definitions.py
Definition.add_property
def add_property(self, prop, objects=()): """Add a property to the definition and add ``objects`` as related.""" self._properties.add(prop) self._objects |= objects self._pairs.update((o, prop) for o in objects)
python
def add_property(self, prop, objects=()): """Add a property to the definition and add ``objects`` as related.""" self._properties.add(prop) self._objects |= objects self._pairs.update((o, prop) for o in objects)
[ "def", "add_property", "(", "self", ",", "prop", ",", "objects", "=", "(", ")", ")", ":", "self", ".", "_properties", ".", "add", "(", "prop", ")", "self", ".", "_objects", "|=", "objects", "self", ".", "_pairs", ".", "update", "(", "(", "o", ",", ...
Add a property to the definition and add ``objects`` as related.
[ "Add", "a", "property", "to", "the", "definition", "and", "add", "objects", "as", "related", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L356-L360
train
24,005
xflr6/concepts
concepts/definitions.py
Definition.remove_object
def remove_object(self, obj): """Remove an object from the definition.""" self._objects.remove(obj) self._pairs.difference_update((obj, p) for p in self._properties)
python
def remove_object(self, obj): """Remove an object from the definition.""" self._objects.remove(obj) self._pairs.difference_update((obj, p) for p in self._properties)
[ "def", "remove_object", "(", "self", ",", "obj", ")", ":", "self", ".", "_objects", ".", "remove", "(", "obj", ")", "self", ".", "_pairs", ".", "difference_update", "(", "(", "obj", ",", "p", ")", "for", "p", "in", "self", ".", "_properties", ")" ]
Remove an object from the definition.
[ "Remove", "an", "object", "from", "the", "definition", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L362-L365
train
24,006
xflr6/concepts
concepts/definitions.py
Definition.remove_property
def remove_property(self, prop): """Remove a property from the definition.""" self._properties.remove(prop) self._pairs.difference_update((o, prop) for o in self._objects)
python
def remove_property(self, prop): """Remove a property from the definition.""" self._properties.remove(prop) self._pairs.difference_update((o, prop) for o in self._objects)
[ "def", "remove_property", "(", "self", ",", "prop", ")", ":", "self", ".", "_properties", ".", "remove", "(", "prop", ")", "self", ".", "_pairs", ".", "difference_update", "(", "(", "o", ",", "prop", ")", "for", "o", "in", "self", ".", "_objects", ")...
Remove a property from the definition.
[ "Remove", "a", "property", "from", "the", "definition", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L367-L370
train
24,007
xflr6/concepts
concepts/definitions.py
Definition.set_object
def set_object(self, obj, properties): """Add an object to the definition and set its ``properties``.""" self._objects.add(obj) properties = set(properties) self._properties |= properties pairs = self._pairs for p in self._properties: if p in properties: pairs.add((obj, p)) else: pairs.discard((obj, p))
python
def set_object(self, obj, properties): """Add an object to the definition and set its ``properties``.""" self._objects.add(obj) properties = set(properties) self._properties |= properties pairs = self._pairs for p in self._properties: if p in properties: pairs.add((obj, p)) else: pairs.discard((obj, p))
[ "def", "set_object", "(", "self", ",", "obj", ",", "properties", ")", ":", "self", ".", "_objects", ".", "add", "(", "obj", ")", "properties", "=", "set", "(", "properties", ")", "self", ".", "_properties", "|=", "properties", "pairs", "=", "self", "."...
Add an object to the definition and set its ``properties``.
[ "Add", "an", "object", "to", "the", "definition", "and", "set", "its", "properties", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L372-L382
train
24,008
xflr6/concepts
concepts/definitions.py
Definition.set_property
def set_property(self, prop, objects): """Add a property to the definition and set its ``objects``.""" self._properties.add(prop) objects = set(objects) self._objects |= objects pairs = self._pairs for o in self._objects: if o in objects: pairs.add((o, prop)) else: pairs.discard((o, prop))
python
def set_property(self, prop, objects): """Add a property to the definition and set its ``objects``.""" self._properties.add(prop) objects = set(objects) self._objects |= objects pairs = self._pairs for o in self._objects: if o in objects: pairs.add((o, prop)) else: pairs.discard((o, prop))
[ "def", "set_property", "(", "self", ",", "prop", ",", "objects", ")", ":", "self", ".", "_properties", ".", "add", "(", "prop", ")", "objects", "=", "set", "(", "objects", ")", "self", ".", "_objects", "|=", "objects", "pairs", "=", "self", ".", "_pa...
Add a property to the definition and set its ``objects``.
[ "Add", "a", "property", "to", "the", "definition", "and", "set", "its", "objects", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L384-L394
train
24,009
xflr6/concepts
concepts/definitions.py
Definition.union_update
def union_update(self, other, ignore_conflicts=False): """Update the definition with the union of the ``other``.""" if not ignore_conflicts: ensure_compatible(self, other) self._objects |= other._objects self._properties |= other._properties self._pairs |= other._pairs
python
def union_update(self, other, ignore_conflicts=False): """Update the definition with the union of the ``other``.""" if not ignore_conflicts: ensure_compatible(self, other) self._objects |= other._objects self._properties |= other._properties self._pairs |= other._pairs
[ "def", "union_update", "(", "self", ",", "other", ",", "ignore_conflicts", "=", "False", ")", ":", "if", "not", "ignore_conflicts", ":", "ensure_compatible", "(", "self", ",", "other", ")", "self", ".", "_objects", "|=", "other", ".", "_objects", "self", "...
Update the definition with the union of the ``other``.
[ "Update", "the", "definition", "with", "the", "union", "of", "the", "other", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L396-L402
train
24,010
xflr6/concepts
concepts/definitions.py
Definition.union
def union(self, other, ignore_conflicts=False): """Return a new definition from the union of the definitions.""" result = self.copy() result.union_update(other, ignore_conflicts) return result
python
def union(self, other, ignore_conflicts=False): """Return a new definition from the union of the definitions.""" result = self.copy() result.union_update(other, ignore_conflicts) return result
[ "def", "union", "(", "self", ",", "other", ",", "ignore_conflicts", "=", "False", ")", ":", "result", "=", "self", ".", "copy", "(", ")", "result", ".", "union_update", "(", "other", ",", "ignore_conflicts", ")", "return", "result" ]
Return a new definition from the union of the definitions.
[ "Return", "a", "new", "definition", "from", "the", "union", "of", "the", "definitions", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L420-L424
train
24,011
xflr6/concepts
concepts/definitions.py
Definition.intersection
def intersection(self, other, ignore_conflicts=False): """Return a new definition from the intersection of the definitions.""" result = self.copy() result.intersection_update(other, ignore_conflicts) return result
python
def intersection(self, other, ignore_conflicts=False): """Return a new definition from the intersection of the definitions.""" result = self.copy() result.intersection_update(other, ignore_conflicts) return result
[ "def", "intersection", "(", "self", ",", "other", ",", "ignore_conflicts", "=", "False", ")", ":", "result", "=", "self", ".", "copy", "(", ")", "result", ".", "intersection_update", "(", "other", ",", "ignore_conflicts", ")", "return", "result" ]
Return a new definition from the intersection of the definitions.
[ "Return", "a", "new", "definition", "from", "the", "intersection", "of", "the", "definitions", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L426-L430
train
24,012
xflr6/concepts
concepts/tools.py
maximal
def maximal(iterable, comparison=operator.lt, _groupkey=operator.itemgetter(0)): """Yield the unique maximal elements from ``iterable`` using ``comparison``. >>> list(maximal([1, 2, 3, 3])) [3] >>> list(maximal([1])) [1] """ iterable = set(iterable) if len(iterable) < 2: return iterable return (item for item, pairs in groupby(permutations(iterable, 2), key=_groupkey) if not any(starmap(comparison, pairs)))
python
def maximal(iterable, comparison=operator.lt, _groupkey=operator.itemgetter(0)): """Yield the unique maximal elements from ``iterable`` using ``comparison``. >>> list(maximal([1, 2, 3, 3])) [3] >>> list(maximal([1])) [1] """ iterable = set(iterable) if len(iterable) < 2: return iterable return (item for item, pairs in groupby(permutations(iterable, 2), key=_groupkey) if not any(starmap(comparison, pairs)))
[ "def", "maximal", "(", "iterable", ",", "comparison", "=", "operator", ".", "lt", ",", "_groupkey", "=", "operator", ".", "itemgetter", "(", "0", ")", ")", ":", "iterable", "=", "set", "(", "iterable", ")", "if", "len", "(", "iterable", ")", "<", "2"...
Yield the unique maximal elements from ``iterable`` using ``comparison``. >>> list(maximal([1, 2, 3, 3])) [3] >>> list(maximal([1])) [1]
[ "Yield", "the", "unique", "maximal", "elements", "from", "iterable", "using", "comparison", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/tools.py#L142-L156
train
24,013
xflr6/concepts
concepts/tools.py
Unique.replace
def replace(self, item, new_item): """Replace an item preserving order. >>> u = Unique([0, 1, 2]) >>> u.replace(1, 'spam') >>> u Unique([0, 'spam', 2]) >>> u.replace('eggs', 1) Traceback (most recent call last): ... ValueError: 'eggs' is not in list >>> u.replace('spam', 0) Traceback (most recent call last): ... ValueError: 0 already in list """ if new_item in self._seen: raise ValueError('%r already in list' % new_item) idx = self._items.index(item) self._seen.remove(item) self._seen.add(new_item) self._items[idx] = new_item
python
def replace(self, item, new_item): """Replace an item preserving order. >>> u = Unique([0, 1, 2]) >>> u.replace(1, 'spam') >>> u Unique([0, 'spam', 2]) >>> u.replace('eggs', 1) Traceback (most recent call last): ... ValueError: 'eggs' is not in list >>> u.replace('spam', 0) Traceback (most recent call last): ... ValueError: 0 already in list """ if new_item in self._seen: raise ValueError('%r already in list' % new_item) idx = self._items.index(item) self._seen.remove(item) self._seen.add(new_item) self._items[idx] = new_item
[ "def", "replace", "(", "self", ",", "item", ",", "new_item", ")", ":", "if", "new_item", "in", "self", ".", "_seen", ":", "raise", "ValueError", "(", "'%r already in list'", "%", "new_item", ")", "idx", "=", "self", ".", "_items", ".", "index", "(", "i...
Replace an item preserving order. >>> u = Unique([0, 1, 2]) >>> u.replace(1, 'spam') >>> u Unique([0, 'spam', 2]) >>> u.replace('eggs', 1) Traceback (most recent call last): ... ValueError: 'eggs' is not in list >>> u.replace('spam', 0) Traceback (most recent call last): ... ValueError: 0 already in list
[ "Replace", "an", "item", "preserving", "order", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/tools.py#L58-L81
train
24,014
xflr6/concepts
concepts/tools.py
Unique.move
def move(self, item, new_index): """Move an item to the given position. >>> u = Unique(['spam', 'eggs']) >>> u.move('spam', 1) >>> u Unique(['eggs', 'spam']) >>> u.move('ham', 0) Traceback (most recent call last): ... ValueError: 'ham' is not in list """ idx = self._items.index(item) if idx != new_index: item = self._items.pop(idx) self._items.insert(new_index, item)
python
def move(self, item, new_index): """Move an item to the given position. >>> u = Unique(['spam', 'eggs']) >>> u.move('spam', 1) >>> u Unique(['eggs', 'spam']) >>> u.move('ham', 0) Traceback (most recent call last): ... ValueError: 'ham' is not in list """ idx = self._items.index(item) if idx != new_index: item = self._items.pop(idx) self._items.insert(new_index, item)
[ "def", "move", "(", "self", ",", "item", ",", "new_index", ")", ":", "idx", "=", "self", ".", "_items", ".", "index", "(", "item", ")", "if", "idx", "!=", "new_index", ":", "item", "=", "self", ".", "_items", ".", "pop", "(", "idx", ")", "self", ...
Move an item to the given position. >>> u = Unique(['spam', 'eggs']) >>> u.move('spam', 1) >>> u Unique(['eggs', 'spam']) >>> u.move('ham', 0) Traceback (most recent call last): ... ValueError: 'ham' is not in list
[ "Move", "an", "item", "to", "the", "given", "position", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/tools.py#L83-L99
train
24,015
xflr6/concepts
concepts/tools.py
Unique.issuperset
def issuperset(self, items): """Return whether this collection contains all items. >>> Unique(['spam', 'eggs']).issuperset(['spam', 'spam', 'spam']) True """ return all(_compat.map(self._seen.__contains__, items))
python
def issuperset(self, items): """Return whether this collection contains all items. >>> Unique(['spam', 'eggs']).issuperset(['spam', 'spam', 'spam']) True """ return all(_compat.map(self._seen.__contains__, items))
[ "def", "issuperset", "(", "self", ",", "items", ")", ":", "return", "all", "(", "_compat", ".", "map", "(", "self", ".", "_seen", ".", "__contains__", ",", "items", ")", ")" ]
Return whether this collection contains all items. >>> Unique(['spam', 'eggs']).issuperset(['spam', 'spam', 'spam']) True
[ "Return", "whether", "this", "collection", "contains", "all", "items", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/tools.py#L101-L107
train
24,016
xflr6/concepts
concepts/tools.py
Unique.rsub
def rsub(self, items): """Return order preserving unique items not in this collection. >>> Unique(['spam']).rsub(['ham', 'spam', 'eggs']) Unique(['ham', 'eggs']) """ ignore = self._seen seen = set() add = seen.add items = [i for i in items if i not in ignore and i not in seen and not add(i)] return self._fromargs(seen, items)
python
def rsub(self, items): """Return order preserving unique items not in this collection. >>> Unique(['spam']).rsub(['ham', 'spam', 'eggs']) Unique(['ham', 'eggs']) """ ignore = self._seen seen = set() add = seen.add items = [i for i in items if i not in ignore and i not in seen and not add(i)] return self._fromargs(seen, items)
[ "def", "rsub", "(", "self", ",", "items", ")", ":", "ignore", "=", "self", ".", "_seen", "seen", "=", "set", "(", ")", "add", "=", "seen", ".", "add", "items", "=", "[", "i", "for", "i", "in", "items", "if", "i", "not", "in", "ignore", "and", ...
Return order preserving unique items not in this collection. >>> Unique(['spam']).rsub(['ham', 'spam', 'eggs']) Unique(['ham', 'eggs'])
[ "Return", "order", "preserving", "unique", "items", "not", "in", "this", "collection", "." ]
2801b27b05fa02cccee7d549451810ffcbf5c942
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/tools.py#L109-L120
train
24,017
scrapinghub/skinfer
skinfer/json_schema_merger.py
merge_schema
def merge_schema(first, second): """Returns the result of merging the two given schemas. """ if not (type(first) == type(second) == dict): raise ValueError("Argument is not a schema") if not (first.get('type') == second.get('type') == 'object'): raise NotImplementedError("Unsupported root type") return merge_objects(first, second)
python
def merge_schema(first, second): """Returns the result of merging the two given schemas. """ if not (type(first) == type(second) == dict): raise ValueError("Argument is not a schema") if not (first.get('type') == second.get('type') == 'object'): raise NotImplementedError("Unsupported root type") return merge_objects(first, second)
[ "def", "merge_schema", "(", "first", ",", "second", ")", ":", "if", "not", "(", "type", "(", "first", ")", "==", "type", "(", "second", ")", "==", "dict", ")", ":", "raise", "ValueError", "(", "\"Argument is not a schema\"", ")", "if", "not", "(", "fir...
Returns the result of merging the two given schemas.
[ "Returns", "the", "result", "of", "merging", "the", "two", "given", "schemas", "." ]
7db5bc8b27229f20b718a8f5a1d219b1b0396316
https://github.com/scrapinghub/skinfer/blob/7db5bc8b27229f20b718a8f5a1d219b1b0396316/skinfer/json_schema_merger.py#L176-L185
train
24,018
scrapinghub/skinfer
skinfer/schema_inferer.py
generate_and_merge_schemas
def generate_and_merge_schemas(samples): """Iterates through the given samples, generating schemas and merging them, returning the resulting merged schema. """ merged = generate_schema_for_sample(next(iter(samples))) for sample in samples: merged = merge_schema(merged, generate_schema_for_sample(sample)) return merged
python
def generate_and_merge_schemas(samples): """Iterates through the given samples, generating schemas and merging them, returning the resulting merged schema. """ merged = generate_schema_for_sample(next(iter(samples))) for sample in samples: merged = merge_schema(merged, generate_schema_for_sample(sample)) return merged
[ "def", "generate_and_merge_schemas", "(", "samples", ")", ":", "merged", "=", "generate_schema_for_sample", "(", "next", "(", "iter", "(", "samples", ")", ")", ")", "for", "sample", "in", "samples", ":", "merged", "=", "merge_schema", "(", "merged", ",", "ge...
Iterates through the given samples, generating schemas and merging them, returning the resulting merged schema.
[ "Iterates", "through", "the", "given", "samples", "generating", "schemas", "and", "merging", "them", "returning", "the", "resulting", "merged", "schema", "." ]
7db5bc8b27229f20b718a8f5a1d219b1b0396316
https://github.com/scrapinghub/skinfer/blob/7db5bc8b27229f20b718a8f5a1d219b1b0396316/skinfer/schema_inferer.py#L42-L52
train
24,019
krischer/mtspec
mtspec/multitaper.py
sine_psd
def sine_psd(data, delta, number_of_tapers=None, number_of_iterations=2, degree_of_smoothing=1.0, statistics=False, verbose=False): """ Wrapper method for the sine_psd subroutine in the library by German A. Prieto. The subroutine is in charge of estimating the adaptive sine multitaper as in Riedel and Sidorenko (1995). It outputs the power spectral density (PSD). This is done by performing a MSE adaptive estimation. First a pilot spectral estimate is used, and S" is estimated, in order to get te number of tapers to use, using (13) of Riedel and Sidorenko for a min square error spectrum. Unlike the prolate spheroidal multitapers, the sine multitaper adaptive process introduces a variable resolution and error in the frequency domain. Complete error information is contained in the output variables as the corridor of 1-standard-deviation errors, and in the number of tapers used at each frequency. The errors are estimated in the simplest way, from the number of degrees of freedom (two per taper), not by jack-knifing. The frequency resolution is found from K*fN/Nf where fN is the Nyquist frequency and Nf is the number of frequencies estimated. The adaptive process used is as follows. A quadratic fit to the log PSD within an adaptively determined frequency band is used to find an estimate of the local second derivative of the spectrum. This is used in an equation like R & S (13) for the MSE taper number, with the difference that a parabolic weighting is applied with increasing taper order. Because the FFTs of the tapered series can be found by resampling the FFT of the original time series (doubled in length and padded with zeros) only one FFT is required per series, no matter how many tapers are used. This makes the program fast. Compared with the Thomson multitaper programs, this code is not only fast but simple and short. The spectra associated with the sine tapers are weighted before averaging with a parabolically varying weight. The expression for the optimal number of tapers given by R & S must be modified since it gives an unbounded result near points where S" vanishes, which happens at many points in most spectra. This program restricts the rate of growth of the number of tapers so that a neighboring covering interval estimate is never completely contained in the next such interval. This method SHOULD not be used for sharp cutoffs or deep valleys, or small sample sizes. Instead use Thomson multitaper in mtspec in this same library. :param data: :class:`numpy.ndarray` Array with the data. :param delta: float Sample spacing of the data. :param number_of_tapers: integer/None, optional Number of tapers to use. If none is given, the library will perform an adaptive taper estimation with a varying number of tapers for each frequency. Defaults to None. :param number_of_iterations: integer, optional Number of iterations to perform. Values less than 2 will be set to 2. Defaults to 2. :param degree_of_smoothing: float, optional Degree of smoothing. Defaults to 1.0. :param statistics: bool, optional Calculates and returns statistics. See the notes in the docstring for further details. :param verbose: bool, optional Passed to the fortran library. Defaults to False. :return: Returns a list with :class:`numpy.ndarray`. See the note below for details. .. note:: This method will at return at least two arrays: The calculated spectrum and the corresponding frequencies. If statistics is True is will also return (in the given order) (multidimensional) arrays containing the 1-std errors (a simple dof estimate) and the number of tapers used for each frequency point. """ # Verbose mode on or off. if verbose is True: verbose = C.byref(C.c_char('y')) else: verbose = None # Set the number of tapers so it can be read by the library. if number_of_tapers is None: number_of_tapers = 0 # initialize _MtspecType to save some space mt = _MtspecType("float32") # Transform the data to work with the library. data = np.require(data, dtype=mt.float, requirements=[mt.order]) # Some variables necessary to call the library. npts = len(data) number_of_frequency_bins = int(npts / 2) + 1 # Create output arrays. frequency_bins = mt.empty(number_of_frequency_bins) spectrum = mt.empty(number_of_frequency_bins) # Create optional arrays or set to None. if statistics is True: # here an exception, mt sets the type float32, here we need int32 # that is do all the type and POINTER definition once by hand tapers_per_freq_point = np.empty(number_of_frequency_bins, dtype='int32', order=mt.order) tapers_per_freq_point_p = \ tapers_per_freq_point.ctypes.data_as(C.POINTER(C.c_int)) errors = mt.empty((number_of_frequency_bins, 2)) else: tapers_per_freq_point_p = errors = None # Call the library. Fortran passes pointers! mtspeclib.sine_psd_( C.byref(C.c_int(npts)), C.byref(C.c_float(delta)), mt.p(data), C.byref(C.c_int(number_of_tapers)), C.byref(C.c_int(number_of_iterations)), C.byref(C.c_float(degree_of_smoothing)), C.byref(C.c_int(number_of_frequency_bins)), mt.p(frequency_bins), mt.p(spectrum), tapers_per_freq_point_p, mt.p(errors), verbose) # Calculate return values. return_values = [spectrum, frequency_bins] if statistics is True: return_values.extend([errors, tapers_per_freq_point]) return return_values
python
def sine_psd(data, delta, number_of_tapers=None, number_of_iterations=2, degree_of_smoothing=1.0, statistics=False, verbose=False): """ Wrapper method for the sine_psd subroutine in the library by German A. Prieto. The subroutine is in charge of estimating the adaptive sine multitaper as in Riedel and Sidorenko (1995). It outputs the power spectral density (PSD). This is done by performing a MSE adaptive estimation. First a pilot spectral estimate is used, and S" is estimated, in order to get te number of tapers to use, using (13) of Riedel and Sidorenko for a min square error spectrum. Unlike the prolate spheroidal multitapers, the sine multitaper adaptive process introduces a variable resolution and error in the frequency domain. Complete error information is contained in the output variables as the corridor of 1-standard-deviation errors, and in the number of tapers used at each frequency. The errors are estimated in the simplest way, from the number of degrees of freedom (two per taper), not by jack-knifing. The frequency resolution is found from K*fN/Nf where fN is the Nyquist frequency and Nf is the number of frequencies estimated. The adaptive process used is as follows. A quadratic fit to the log PSD within an adaptively determined frequency band is used to find an estimate of the local second derivative of the spectrum. This is used in an equation like R & S (13) for the MSE taper number, with the difference that a parabolic weighting is applied with increasing taper order. Because the FFTs of the tapered series can be found by resampling the FFT of the original time series (doubled in length and padded with zeros) only one FFT is required per series, no matter how many tapers are used. This makes the program fast. Compared with the Thomson multitaper programs, this code is not only fast but simple and short. The spectra associated with the sine tapers are weighted before averaging with a parabolically varying weight. The expression for the optimal number of tapers given by R & S must be modified since it gives an unbounded result near points where S" vanishes, which happens at many points in most spectra. This program restricts the rate of growth of the number of tapers so that a neighboring covering interval estimate is never completely contained in the next such interval. This method SHOULD not be used for sharp cutoffs or deep valleys, or small sample sizes. Instead use Thomson multitaper in mtspec in this same library. :param data: :class:`numpy.ndarray` Array with the data. :param delta: float Sample spacing of the data. :param number_of_tapers: integer/None, optional Number of tapers to use. If none is given, the library will perform an adaptive taper estimation with a varying number of tapers for each frequency. Defaults to None. :param number_of_iterations: integer, optional Number of iterations to perform. Values less than 2 will be set to 2. Defaults to 2. :param degree_of_smoothing: float, optional Degree of smoothing. Defaults to 1.0. :param statistics: bool, optional Calculates and returns statistics. See the notes in the docstring for further details. :param verbose: bool, optional Passed to the fortran library. Defaults to False. :return: Returns a list with :class:`numpy.ndarray`. See the note below for details. .. note:: This method will at return at least two arrays: The calculated spectrum and the corresponding frequencies. If statistics is True is will also return (in the given order) (multidimensional) arrays containing the 1-std errors (a simple dof estimate) and the number of tapers used for each frequency point. """ # Verbose mode on or off. if verbose is True: verbose = C.byref(C.c_char('y')) else: verbose = None # Set the number of tapers so it can be read by the library. if number_of_tapers is None: number_of_tapers = 0 # initialize _MtspecType to save some space mt = _MtspecType("float32") # Transform the data to work with the library. data = np.require(data, dtype=mt.float, requirements=[mt.order]) # Some variables necessary to call the library. npts = len(data) number_of_frequency_bins = int(npts / 2) + 1 # Create output arrays. frequency_bins = mt.empty(number_of_frequency_bins) spectrum = mt.empty(number_of_frequency_bins) # Create optional arrays or set to None. if statistics is True: # here an exception, mt sets the type float32, here we need int32 # that is do all the type and POINTER definition once by hand tapers_per_freq_point = np.empty(number_of_frequency_bins, dtype='int32', order=mt.order) tapers_per_freq_point_p = \ tapers_per_freq_point.ctypes.data_as(C.POINTER(C.c_int)) errors = mt.empty((number_of_frequency_bins, 2)) else: tapers_per_freq_point_p = errors = None # Call the library. Fortran passes pointers! mtspeclib.sine_psd_( C.byref(C.c_int(npts)), C.byref(C.c_float(delta)), mt.p(data), C.byref(C.c_int(number_of_tapers)), C.byref(C.c_int(number_of_iterations)), C.byref(C.c_float(degree_of_smoothing)), C.byref(C.c_int(number_of_frequency_bins)), mt.p(frequency_bins), mt.p(spectrum), tapers_per_freq_point_p, mt.p(errors), verbose) # Calculate return values. return_values = [spectrum, frequency_bins] if statistics is True: return_values.extend([errors, tapers_per_freq_point]) return return_values
[ "def", "sine_psd", "(", "data", ",", "delta", ",", "number_of_tapers", "=", "None", ",", "number_of_iterations", "=", "2", ",", "degree_of_smoothing", "=", "1.0", ",", "statistics", "=", "False", ",", "verbose", "=", "False", ")", ":", "# Verbose mode on or of...
Wrapper method for the sine_psd subroutine in the library by German A. Prieto. The subroutine is in charge of estimating the adaptive sine multitaper as in Riedel and Sidorenko (1995). It outputs the power spectral density (PSD). This is done by performing a MSE adaptive estimation. First a pilot spectral estimate is used, and S" is estimated, in order to get te number of tapers to use, using (13) of Riedel and Sidorenko for a min square error spectrum. Unlike the prolate spheroidal multitapers, the sine multitaper adaptive process introduces a variable resolution and error in the frequency domain. Complete error information is contained in the output variables as the corridor of 1-standard-deviation errors, and in the number of tapers used at each frequency. The errors are estimated in the simplest way, from the number of degrees of freedom (two per taper), not by jack-knifing. The frequency resolution is found from K*fN/Nf where fN is the Nyquist frequency and Nf is the number of frequencies estimated. The adaptive process used is as follows. A quadratic fit to the log PSD within an adaptively determined frequency band is used to find an estimate of the local second derivative of the spectrum. This is used in an equation like R & S (13) for the MSE taper number, with the difference that a parabolic weighting is applied with increasing taper order. Because the FFTs of the tapered series can be found by resampling the FFT of the original time series (doubled in length and padded with zeros) only one FFT is required per series, no matter how many tapers are used. This makes the program fast. Compared with the Thomson multitaper programs, this code is not only fast but simple and short. The spectra associated with the sine tapers are weighted before averaging with a parabolically varying weight. The expression for the optimal number of tapers given by R & S must be modified since it gives an unbounded result near points where S" vanishes, which happens at many points in most spectra. This program restricts the rate of growth of the number of tapers so that a neighboring covering interval estimate is never completely contained in the next such interval. This method SHOULD not be used for sharp cutoffs or deep valleys, or small sample sizes. Instead use Thomson multitaper in mtspec in this same library. :param data: :class:`numpy.ndarray` Array with the data. :param delta: float Sample spacing of the data. :param number_of_tapers: integer/None, optional Number of tapers to use. If none is given, the library will perform an adaptive taper estimation with a varying number of tapers for each frequency. Defaults to None. :param number_of_iterations: integer, optional Number of iterations to perform. Values less than 2 will be set to 2. Defaults to 2. :param degree_of_smoothing: float, optional Degree of smoothing. Defaults to 1.0. :param statistics: bool, optional Calculates and returns statistics. See the notes in the docstring for further details. :param verbose: bool, optional Passed to the fortran library. Defaults to False. :return: Returns a list with :class:`numpy.ndarray`. See the note below for details. .. note:: This method will at return at least two arrays: The calculated spectrum and the corresponding frequencies. If statistics is True is will also return (in the given order) (multidimensional) arrays containing the 1-std errors (a simple dof estimate) and the number of tapers used for each frequency point.
[ "Wrapper", "method", "for", "the", "sine_psd", "subroutine", "in", "the", "library", "by", "German", "A", ".", "Prieto", "." ]
06561b6370f13fcb2e731470ba0f7314f4b2362d
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/multitaper.py#L183-L298
train
24,020
krischer/mtspec
mtspec/multitaper.py
dpss
def dpss(npts, fw, number_of_tapers, auto_spline=True, npts_max=None): """ Calculates DPSS also known as Slepian sequences or Slepian tapers. Calculation of the DPSS (Discrete Prolate Spheroidal Sequences) and the correspondent eigenvalues. The (1 - eigenvalue) terms are also calculated. Wraps the ``dpss()`` subroutine from the Fortran library. By default this routine will use spline interpolation if sequences with more than 200.000 samples are requested. .. note:: The tapers are the eigenvectors of the tridiagonal matrix sigma(i, j) [see Slepian(1978) eq 14 and 25]. They are also the eigenvectors of the Toeplitz matrix, eq. 18. :param npts: The number of points in the series. :type npts: int :param fw: The time-bandwidth product (number of Rayleigh bins). :type fw: float :param number_of_tapers: The desired number of tapers. :type number_of_tapers: int :param auto_spline: Whether or not to automatically use spline interpolation for ``npts`` > 200000. :type auto_spline: bool :param npts_max: The number of actual points to calculate the DPSS. If this number is smaller than ``npts``, spline interpolation will be performed, regardless of the value of ``auto_spline``. :type npts_max: None or int :returns: ``(v, lambda, theta)`` with ``v(npts, number_of_tapers)`` the eigenvectors (tapers), ``lambda`` the eigenvalues of the ``v``'s and ``theta`` the 1 - ``lambda`` (energy outside the bandwidth) values. .. rubric:: Example This example demonstrates how to calculate and plot the first five DPSS'. >>> import matplotlib.pyplot as plt >>> from mtspec import dpss >>> tapers, lamb, theta = dpss(512, 2.5, 5) >>> for i in range(5): ... plt.plot(tapers[:, i]) .. plot :: # Same as the code snippet in the docstring, just a bit prettier. import matplotlib.pyplot as plt plt.style.use("ggplot") from mtspec import dpss tapers, lamb, theta = dpss(512, 2.5, 5) for i in range(5): plt.plot(tapers[:, i]) plt.xlim(0, 512) plt.ylim(-0.09, 0.09) plt.tight_layout() """ mt = _MtspecType("float64") v = mt.empty((npts, number_of_tapers)) lamb = mt.empty(number_of_tapers) theta = mt.empty(number_of_tapers) # Set auto_spline to True. if npts_max and npts_max < npts: auto_spline = True # Always set npts_max. else: npts_max = 200000 # Call either the spline routine or the normal routine. if auto_spline is True and npts > npts_max: mtspeclib.dpss_spline_( C.byref(C.c_int(npts_max)), C.byref(C.c_int(npts)), C.byref(C.c_double(fw)), C.byref(C.c_int(number_of_tapers)), mt.p(v), mt.p(lamb), mt.p(theta)) else: mtspeclib.dpss_(C.byref(C.c_int(npts)), C.byref(C.c_double(fw)), C.byref(C.c_int(number_of_tapers)), mt.p(v), mt.p(lamb), mt.p(theta)) return (v, lamb, theta)
python
def dpss(npts, fw, number_of_tapers, auto_spline=True, npts_max=None): """ Calculates DPSS also known as Slepian sequences or Slepian tapers. Calculation of the DPSS (Discrete Prolate Spheroidal Sequences) and the correspondent eigenvalues. The (1 - eigenvalue) terms are also calculated. Wraps the ``dpss()`` subroutine from the Fortran library. By default this routine will use spline interpolation if sequences with more than 200.000 samples are requested. .. note:: The tapers are the eigenvectors of the tridiagonal matrix sigma(i, j) [see Slepian(1978) eq 14 and 25]. They are also the eigenvectors of the Toeplitz matrix, eq. 18. :param npts: The number of points in the series. :type npts: int :param fw: The time-bandwidth product (number of Rayleigh bins). :type fw: float :param number_of_tapers: The desired number of tapers. :type number_of_tapers: int :param auto_spline: Whether or not to automatically use spline interpolation for ``npts`` > 200000. :type auto_spline: bool :param npts_max: The number of actual points to calculate the DPSS. If this number is smaller than ``npts``, spline interpolation will be performed, regardless of the value of ``auto_spline``. :type npts_max: None or int :returns: ``(v, lambda, theta)`` with ``v(npts, number_of_tapers)`` the eigenvectors (tapers), ``lambda`` the eigenvalues of the ``v``'s and ``theta`` the 1 - ``lambda`` (energy outside the bandwidth) values. .. rubric:: Example This example demonstrates how to calculate and plot the first five DPSS'. >>> import matplotlib.pyplot as plt >>> from mtspec import dpss >>> tapers, lamb, theta = dpss(512, 2.5, 5) >>> for i in range(5): ... plt.plot(tapers[:, i]) .. plot :: # Same as the code snippet in the docstring, just a bit prettier. import matplotlib.pyplot as plt plt.style.use("ggplot") from mtspec import dpss tapers, lamb, theta = dpss(512, 2.5, 5) for i in range(5): plt.plot(tapers[:, i]) plt.xlim(0, 512) plt.ylim(-0.09, 0.09) plt.tight_layout() """ mt = _MtspecType("float64") v = mt.empty((npts, number_of_tapers)) lamb = mt.empty(number_of_tapers) theta = mt.empty(number_of_tapers) # Set auto_spline to True. if npts_max and npts_max < npts: auto_spline = True # Always set npts_max. else: npts_max = 200000 # Call either the spline routine or the normal routine. if auto_spline is True and npts > npts_max: mtspeclib.dpss_spline_( C.byref(C.c_int(npts_max)), C.byref(C.c_int(npts)), C.byref(C.c_double(fw)), C.byref(C.c_int(number_of_tapers)), mt.p(v), mt.p(lamb), mt.p(theta)) else: mtspeclib.dpss_(C.byref(C.c_int(npts)), C.byref(C.c_double(fw)), C.byref(C.c_int(number_of_tapers)), mt.p(v), mt.p(lamb), mt.p(theta)) return (v, lamb, theta)
[ "def", "dpss", "(", "npts", ",", "fw", ",", "number_of_tapers", ",", "auto_spline", "=", "True", ",", "npts_max", "=", "None", ")", ":", "mt", "=", "_MtspecType", "(", "\"float64\"", ")", "v", "=", "mt", ".", "empty", "(", "(", "npts", ",", "number_o...
Calculates DPSS also known as Slepian sequences or Slepian tapers. Calculation of the DPSS (Discrete Prolate Spheroidal Sequences) and the correspondent eigenvalues. The (1 - eigenvalue) terms are also calculated. Wraps the ``dpss()`` subroutine from the Fortran library. By default this routine will use spline interpolation if sequences with more than 200.000 samples are requested. .. note:: The tapers are the eigenvectors of the tridiagonal matrix sigma(i, j) [see Slepian(1978) eq 14 and 25]. They are also the eigenvectors of the Toeplitz matrix, eq. 18. :param npts: The number of points in the series. :type npts: int :param fw: The time-bandwidth product (number of Rayleigh bins). :type fw: float :param number_of_tapers: The desired number of tapers. :type number_of_tapers: int :param auto_spline: Whether or not to automatically use spline interpolation for ``npts`` > 200000. :type auto_spline: bool :param npts_max: The number of actual points to calculate the DPSS. If this number is smaller than ``npts``, spline interpolation will be performed, regardless of the value of ``auto_spline``. :type npts_max: None or int :returns: ``(v, lambda, theta)`` with ``v(npts, number_of_tapers)`` the eigenvectors (tapers), ``lambda`` the eigenvalues of the ``v``'s and ``theta`` the 1 - ``lambda`` (energy outside the bandwidth) values. .. rubric:: Example This example demonstrates how to calculate and plot the first five DPSS'. >>> import matplotlib.pyplot as plt >>> from mtspec import dpss >>> tapers, lamb, theta = dpss(512, 2.5, 5) >>> for i in range(5): ... plt.plot(tapers[:, i]) .. plot :: # Same as the code snippet in the docstring, just a bit prettier. import matplotlib.pyplot as plt plt.style.use("ggplot") from mtspec import dpss tapers, lamb, theta = dpss(512, 2.5, 5) for i in range(5): plt.plot(tapers[:, i]) plt.xlim(0, 512) plt.ylim(-0.09, 0.09) plt.tight_layout()
[ "Calculates", "DPSS", "also", "known", "as", "Slepian", "sequences", "or", "Slepian", "tapers", "." ]
06561b6370f13fcb2e731470ba0f7314f4b2362d
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/multitaper.py#L301-L384
train
24,021
krischer/mtspec
mtspec/multitaper.py
wigner_ville_spectrum
def wigner_ville_spectrum(data, delta, time_bandwidth=3.5, number_of_tapers=None, smoothing_filter=None, filter_width=100, frequency_divider=1, verbose=False): """ Function to calculate the Wigner-Ville Distribution or Wigner-Ville Spectrum of a signal using multitaper spectral estimates. In general it gives better temporal and frequency resolution than a spectrogram but introduces many artifacts and possibly negative values which are not physical. This can be alleviated a bit by applying a smoothing kernel which is also known as a reduced interference distribution (RID). Wraps the ``wv_spec()`` and ``wv_spec_to_array()`` subroutines of the Fortran library. It is very slow for large arrays so try with a small one (< 5000 samples) first. :param data: The input signal. :type data: numpy.ndarray :param delta: The sampling interval of the data. :type delta: float :param time_bandwidth: Time bandwidth product for the tapers. :type time_bandwidth: float :param number_of_tapers: Number of tapers to use. If ``None``, the number will be automatically determined from the time bandwidth product which is usually the optimal choice. :type number_of_tapers: int :param smoothing_filter: One of ``"boxcar"``, ``"gauss"`` or just ``None`` :type smoothing_filter: str :param filter_width: Filter width in samples. :type filter_width: int :param frequency_divider: This method will always calculate all frequencies from 0 ... Nyquist frequency. This parameter allows the adjustment of the maximum frequency, so that the frequencies range from 0 .. Nyquist frequency / int(frequency_divider). :type frequency_divider: int :param verbose: Verbose output on/off. :type verbose: bool .. rubric:: Example This example demonstrates how to plot a signal, its multitaper spectral estimate, and its Wigner-Ville time-frequency distribution. The signal is sinusoidal overlaid with two simple linear chirps. >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from mtspec import mtspec, wigner_ville_spectrum >>> from mtspec.util import signal_bursts >>> fig = plt.figure() Get the example signal. >>> data = signal_bursts() Plot the data on the top axes. >>> ax1 = fig.add_axes([0.2,0.75, 0.79, 0.23]) >>> ax1.plot(data, color="0.1") >>> ax1.set_xlim(0, len(data)) Plot its spectral estimate on the side. >>> ax2 = fig.add_axes([0.06,0.02,0.13,0.69]) >>> spec, freq = mtspec(data, 10, 3.5) >>> ax2.plot(spec, freq, color="0.1") >>> ax2.set_xlim(0, spec.max()) >>> ax2.set_ylim(freq[0], freq[-1]) >>> ax2.set_xticks([]) Create and plot the Wigner-Ville distribution. >>> wv = wigner_ville_spectrum(data, 10, 3.5, ... smoothing_filter='gauss') >>> ax3 = fig.add_axes([0.2, 0.02, 0.79, 0.69]) >>> ax3.set_yticks([]) >>> ax3.set_xticks([]) >>> # The square root only serves plotting purposes. >>> ax3.imshow(np.sqrt(abs(wv)), interpolation='lanczos', ... aspect='auto', cmap="magma") .. plot:: # Same as the above code snippet just a bit prettier. import matplotlib.pyplot as plt plt.style.use("ggplot") from mtspec import mtspec, wigner_ville_spectrum from mtspec.util import signal_bursts import numpy as np fig = plt.figure() data = signal_bursts() # Plot the data ax1 = fig.add_axes([0.2,0.75, 0.79, 0.23]) ax1.plot(data, color="0.1") ax1.set_xlim(0, len(data)) # Plot multitaper spectrum ax2 = fig.add_axes([0.06,0.02,0.13,0.69]) spec, freq = mtspec(data, 10, 3.5) ax2.plot(spec, freq, color="0.1") ax2.set_xlim(0, spec.max()) ax2.set_ylim(freq[0], freq[-1]) ax2.set_xticks([]) # Create the wigner ville spectrum wv = wigner_ville_spectrum(data, 10, 3.5, smoothing_filter='gauss') # Plot the WV ax3 = fig.add_axes([0.2, 0.02, 0.79, 0.69]) ax3.set_yticks([]) ax3.set_xticks([]) ax3.imshow(np.sqrt(abs(wv)), interpolation='lanczos', aspect='auto', cmap="magma") """ data = np.require(data, 'float32') mt = _MtspecType("float32") npts = len(data) # Use the optimal number of tapers in case no number is specified. if number_of_tapers is None: number_of_tapers = int(2 * time_bandwidth) - 1 # Determine filter. if not smoothing_filter: smoothing_filter = 0 elif smoothing_filter == 'boxcar': smoothing_filter = 1 elif smoothing_filter == 'gauss': smoothing_filter = 2 else: msg = 'Invalid value for smoothing filter.' raise Exception(msg) # Verbose mode on or off. if verbose: verbose = C.byref(C.c_char('y')) else: verbose = None # Allocate the output array # f90 code internally pads zeros to 2 * npts. That is we only return # every second frequency point, thus decrease the size of the array output = mt.empty((npts // 2 // int(frequency_divider) + 1, npts)) mtspeclib.wv_spec_to_array_(C.byref(C.c_int(npts)), C.byref(C.c_float(delta)), mt.p(data), mt.p(output), C.byref(C.c_float(time_bandwidth)), C.byref(C.c_int(number_of_tapers)), C.byref(C.c_int(smoothing_filter)), C.byref(C.c_float(filter_width)), C.byref(C.c_int(frequency_divider)), verbose) return output
python
def wigner_ville_spectrum(data, delta, time_bandwidth=3.5, number_of_tapers=None, smoothing_filter=None, filter_width=100, frequency_divider=1, verbose=False): """ Function to calculate the Wigner-Ville Distribution or Wigner-Ville Spectrum of a signal using multitaper spectral estimates. In general it gives better temporal and frequency resolution than a spectrogram but introduces many artifacts and possibly negative values which are not physical. This can be alleviated a bit by applying a smoothing kernel which is also known as a reduced interference distribution (RID). Wraps the ``wv_spec()`` and ``wv_spec_to_array()`` subroutines of the Fortran library. It is very slow for large arrays so try with a small one (< 5000 samples) first. :param data: The input signal. :type data: numpy.ndarray :param delta: The sampling interval of the data. :type delta: float :param time_bandwidth: Time bandwidth product for the tapers. :type time_bandwidth: float :param number_of_tapers: Number of tapers to use. If ``None``, the number will be automatically determined from the time bandwidth product which is usually the optimal choice. :type number_of_tapers: int :param smoothing_filter: One of ``"boxcar"``, ``"gauss"`` or just ``None`` :type smoothing_filter: str :param filter_width: Filter width in samples. :type filter_width: int :param frequency_divider: This method will always calculate all frequencies from 0 ... Nyquist frequency. This parameter allows the adjustment of the maximum frequency, so that the frequencies range from 0 .. Nyquist frequency / int(frequency_divider). :type frequency_divider: int :param verbose: Verbose output on/off. :type verbose: bool .. rubric:: Example This example demonstrates how to plot a signal, its multitaper spectral estimate, and its Wigner-Ville time-frequency distribution. The signal is sinusoidal overlaid with two simple linear chirps. >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from mtspec import mtspec, wigner_ville_spectrum >>> from mtspec.util import signal_bursts >>> fig = plt.figure() Get the example signal. >>> data = signal_bursts() Plot the data on the top axes. >>> ax1 = fig.add_axes([0.2,0.75, 0.79, 0.23]) >>> ax1.plot(data, color="0.1") >>> ax1.set_xlim(0, len(data)) Plot its spectral estimate on the side. >>> ax2 = fig.add_axes([0.06,0.02,0.13,0.69]) >>> spec, freq = mtspec(data, 10, 3.5) >>> ax2.plot(spec, freq, color="0.1") >>> ax2.set_xlim(0, spec.max()) >>> ax2.set_ylim(freq[0], freq[-1]) >>> ax2.set_xticks([]) Create and plot the Wigner-Ville distribution. >>> wv = wigner_ville_spectrum(data, 10, 3.5, ... smoothing_filter='gauss') >>> ax3 = fig.add_axes([0.2, 0.02, 0.79, 0.69]) >>> ax3.set_yticks([]) >>> ax3.set_xticks([]) >>> # The square root only serves plotting purposes. >>> ax3.imshow(np.sqrt(abs(wv)), interpolation='lanczos', ... aspect='auto', cmap="magma") .. plot:: # Same as the above code snippet just a bit prettier. import matplotlib.pyplot as plt plt.style.use("ggplot") from mtspec import mtspec, wigner_ville_spectrum from mtspec.util import signal_bursts import numpy as np fig = plt.figure() data = signal_bursts() # Plot the data ax1 = fig.add_axes([0.2,0.75, 0.79, 0.23]) ax1.plot(data, color="0.1") ax1.set_xlim(0, len(data)) # Plot multitaper spectrum ax2 = fig.add_axes([0.06,0.02,0.13,0.69]) spec, freq = mtspec(data, 10, 3.5) ax2.plot(spec, freq, color="0.1") ax2.set_xlim(0, spec.max()) ax2.set_ylim(freq[0], freq[-1]) ax2.set_xticks([]) # Create the wigner ville spectrum wv = wigner_ville_spectrum(data, 10, 3.5, smoothing_filter='gauss') # Plot the WV ax3 = fig.add_axes([0.2, 0.02, 0.79, 0.69]) ax3.set_yticks([]) ax3.set_xticks([]) ax3.imshow(np.sqrt(abs(wv)), interpolation='lanczos', aspect='auto', cmap="magma") """ data = np.require(data, 'float32') mt = _MtspecType("float32") npts = len(data) # Use the optimal number of tapers in case no number is specified. if number_of_tapers is None: number_of_tapers = int(2 * time_bandwidth) - 1 # Determine filter. if not smoothing_filter: smoothing_filter = 0 elif smoothing_filter == 'boxcar': smoothing_filter = 1 elif smoothing_filter == 'gauss': smoothing_filter = 2 else: msg = 'Invalid value for smoothing filter.' raise Exception(msg) # Verbose mode on or off. if verbose: verbose = C.byref(C.c_char('y')) else: verbose = None # Allocate the output array # f90 code internally pads zeros to 2 * npts. That is we only return # every second frequency point, thus decrease the size of the array output = mt.empty((npts // 2 // int(frequency_divider) + 1, npts)) mtspeclib.wv_spec_to_array_(C.byref(C.c_int(npts)), C.byref(C.c_float(delta)), mt.p(data), mt.p(output), C.byref(C.c_float(time_bandwidth)), C.byref(C.c_int(number_of_tapers)), C.byref(C.c_int(smoothing_filter)), C.byref(C.c_float(filter_width)), C.byref(C.c_int(frequency_divider)), verbose) return output
[ "def", "wigner_ville_spectrum", "(", "data", ",", "delta", ",", "time_bandwidth", "=", "3.5", ",", "number_of_tapers", "=", "None", ",", "smoothing_filter", "=", "None", ",", "filter_width", "=", "100", ",", "frequency_divider", "=", "1", ",", "verbose", "=", ...
Function to calculate the Wigner-Ville Distribution or Wigner-Ville Spectrum of a signal using multitaper spectral estimates. In general it gives better temporal and frequency resolution than a spectrogram but introduces many artifacts and possibly negative values which are not physical. This can be alleviated a bit by applying a smoothing kernel which is also known as a reduced interference distribution (RID). Wraps the ``wv_spec()`` and ``wv_spec_to_array()`` subroutines of the Fortran library. It is very slow for large arrays so try with a small one (< 5000 samples) first. :param data: The input signal. :type data: numpy.ndarray :param delta: The sampling interval of the data. :type delta: float :param time_bandwidth: Time bandwidth product for the tapers. :type time_bandwidth: float :param number_of_tapers: Number of tapers to use. If ``None``, the number will be automatically determined from the time bandwidth product which is usually the optimal choice. :type number_of_tapers: int :param smoothing_filter: One of ``"boxcar"``, ``"gauss"`` or just ``None`` :type smoothing_filter: str :param filter_width: Filter width in samples. :type filter_width: int :param frequency_divider: This method will always calculate all frequencies from 0 ... Nyquist frequency. This parameter allows the adjustment of the maximum frequency, so that the frequencies range from 0 .. Nyquist frequency / int(frequency_divider). :type frequency_divider: int :param verbose: Verbose output on/off. :type verbose: bool .. rubric:: Example This example demonstrates how to plot a signal, its multitaper spectral estimate, and its Wigner-Ville time-frequency distribution. The signal is sinusoidal overlaid with two simple linear chirps. >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from mtspec import mtspec, wigner_ville_spectrum >>> from mtspec.util import signal_bursts >>> fig = plt.figure() Get the example signal. >>> data = signal_bursts() Plot the data on the top axes. >>> ax1 = fig.add_axes([0.2,0.75, 0.79, 0.23]) >>> ax1.plot(data, color="0.1") >>> ax1.set_xlim(0, len(data)) Plot its spectral estimate on the side. >>> ax2 = fig.add_axes([0.06,0.02,0.13,0.69]) >>> spec, freq = mtspec(data, 10, 3.5) >>> ax2.plot(spec, freq, color="0.1") >>> ax2.set_xlim(0, spec.max()) >>> ax2.set_ylim(freq[0], freq[-1]) >>> ax2.set_xticks([]) Create and plot the Wigner-Ville distribution. >>> wv = wigner_ville_spectrum(data, 10, 3.5, ... smoothing_filter='gauss') >>> ax3 = fig.add_axes([0.2, 0.02, 0.79, 0.69]) >>> ax3.set_yticks([]) >>> ax3.set_xticks([]) >>> # The square root only serves plotting purposes. >>> ax3.imshow(np.sqrt(abs(wv)), interpolation='lanczos', ... aspect='auto', cmap="magma") .. plot:: # Same as the above code snippet just a bit prettier. import matplotlib.pyplot as plt plt.style.use("ggplot") from mtspec import mtspec, wigner_ville_spectrum from mtspec.util import signal_bursts import numpy as np fig = plt.figure() data = signal_bursts() # Plot the data ax1 = fig.add_axes([0.2,0.75, 0.79, 0.23]) ax1.plot(data, color="0.1") ax1.set_xlim(0, len(data)) # Plot multitaper spectrum ax2 = fig.add_axes([0.06,0.02,0.13,0.69]) spec, freq = mtspec(data, 10, 3.5) ax2.plot(spec, freq, color="0.1") ax2.set_xlim(0, spec.max()) ax2.set_ylim(freq[0], freq[-1]) ax2.set_xticks([]) # Create the wigner ville spectrum wv = wigner_ville_spectrum(data, 10, 3.5, smoothing_filter='gauss') # Plot the WV ax3 = fig.add_axes([0.2, 0.02, 0.79, 0.69]) ax3.set_yticks([]) ax3.set_xticks([]) ax3.imshow(np.sqrt(abs(wv)), interpolation='lanczos', aspect='auto', cmap="magma")
[ "Function", "to", "calculate", "the", "Wigner", "-", "Ville", "Distribution", "or", "Wigner", "-", "Ville", "Spectrum", "of", "a", "signal", "using", "multitaper", "spectral", "estimates", "." ]
06561b6370f13fcb2e731470ba0f7314f4b2362d
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/multitaper.py#L387-L546
train
24,022
krischer/mtspec
mtspec/multitaper.py
mt_deconvolve
def mt_deconvolve(data_a, data_b, delta, nfft=None, time_bandwidth=None, number_of_tapers=None, weights="adaptive", demean=True, fmax=0.0): """ Deconvolve two time series using multitapers. This uses the eigencoefficients and the weights from the multitaper spectral estimations and more or less follows this paper: .. |br| raw:: html <br /> **Receiver Functions from Multiple-Taper Spectral Correlation Estimates** *Jeffrey Park, Vadim Levin* |br| Bulletin of the Seismological Society of America Dec 2000, 90 (6) 1507-1520 http://dx.doi.org/10.1785/0119990122 :type data_a: :class:`numpy.ndarray` :param data_a: Data for first time series. :type data_b: :class:`numpy.ndarray` :param data_b: Data for second time series. :type delta: float :param delta: Sample spacing of the data. :type nfft: int :param nfft: Number of points for the FFT. If ``nfft == None``, no zero padding will be applied before the FFT. :type time_bandwidth: float :param time_bandwidth: Time-bandwidth product. Common values are 2, 3, 4, and numbers in between. :type number_of_tapers: int :param number_of_tapers: Number of tapers to use. Defaults to ``int(2*time_bandwidth) - 1``. This is maximum senseful amount. More tapers will have no great influence on the final spectrum but increase the calculation time. Use fewer tapers for a faster calculation. :type weights: str :param weights: ``"adaptive"`` or ``"constant"`` weights. :type deman: bool :param demean: Force the complex TF to be demeaned. :type fmax: float :param fmax: Maximum frequency for lowpass cosine filter. Set this to zero to not have a filter. :return: Returns a dictionary with 5 :class:`numpy.ndarray`'s. See the note below. .. note:: Returns a dictionary with five arrays: * ``"deconvolved"``: Deconvolved time series. * ``"spectrum_a"``: Spectrum of the first time series. * ``"spectrum_b"``: Spectrum of the second time series. * ``"spectral_ratio"``: The ratio of both spectra. * ``"frequencies"``: The used frequency bins for the spectra. """ npts = len(data_a) if len(data_b) != npts: raise ValueError("Input arrays must have the same length!") if nfft is None: nfft = npts elif nfft < npts: raise ValueError("nfft must be larger then the number of samples in " "the array.") # Deconvolution utilizes the 32bit version. mt = _MtspecType("float32") # Use the optimal number of tapers in case no number is specified. if number_of_tapers is None: number_of_tapers = int(2 * time_bandwidth) - 1 # Transform the data to work with the library. data_a = np.require(data_a, mt.float, requirements=[mt.order]) data_b = np.require(data_b, mt.float, requirements=[mt.order]) nf = nfft // 2 + 1 # Internally uses integers if demean: demean = 1 else: demean = 0 # iad = 0 are adaptive, iad = 1 are constant weight - this is # counter intuitive. if weights == "constant": adaptive = 1 elif weights == "adaptive": adaptive = 0 else: raise ValueError('Weights must be either "adaptive" or "constant".') tfun = mt.empty(nfft) freq = mt.empty(nf) spec_ratio = mt.empty(nf) speci = mt.empty(nf) specj = mt.empty(nf) mtspeclib.mt_deconv_( C.byref(C.c_int(int(npts))), C.byref(C.c_int(int(nfft))), C.byref(C.c_float(float(delta))), mt.p(data_a), mt.p(data_b), C.byref(C.c_float(float(time_bandwidth))), C.byref(C.c_int(int(number_of_tapers))), C.byref(C.c_int(int(nf))), C.byref(C.c_int(adaptive)), mt.p(freq), mt.p(tfun), mt.p(spec_ratio), mt.p(speci), mt.p(specj), C.byref(C.c_int(demean)), C.byref(C.c_float(fmax))) return { "frequencies": freq, "deconvolved": tfun, "spectral_ratio": spec_ratio, "spectrum_a": speci, "spectrum_b": specj }
python
def mt_deconvolve(data_a, data_b, delta, nfft=None, time_bandwidth=None, number_of_tapers=None, weights="adaptive", demean=True, fmax=0.0): """ Deconvolve two time series using multitapers. This uses the eigencoefficients and the weights from the multitaper spectral estimations and more or less follows this paper: .. |br| raw:: html <br /> **Receiver Functions from Multiple-Taper Spectral Correlation Estimates** *Jeffrey Park, Vadim Levin* |br| Bulletin of the Seismological Society of America Dec 2000, 90 (6) 1507-1520 http://dx.doi.org/10.1785/0119990122 :type data_a: :class:`numpy.ndarray` :param data_a: Data for first time series. :type data_b: :class:`numpy.ndarray` :param data_b: Data for second time series. :type delta: float :param delta: Sample spacing of the data. :type nfft: int :param nfft: Number of points for the FFT. If ``nfft == None``, no zero padding will be applied before the FFT. :type time_bandwidth: float :param time_bandwidth: Time-bandwidth product. Common values are 2, 3, 4, and numbers in between. :type number_of_tapers: int :param number_of_tapers: Number of tapers to use. Defaults to ``int(2*time_bandwidth) - 1``. This is maximum senseful amount. More tapers will have no great influence on the final spectrum but increase the calculation time. Use fewer tapers for a faster calculation. :type weights: str :param weights: ``"adaptive"`` or ``"constant"`` weights. :type deman: bool :param demean: Force the complex TF to be demeaned. :type fmax: float :param fmax: Maximum frequency for lowpass cosine filter. Set this to zero to not have a filter. :return: Returns a dictionary with 5 :class:`numpy.ndarray`'s. See the note below. .. note:: Returns a dictionary with five arrays: * ``"deconvolved"``: Deconvolved time series. * ``"spectrum_a"``: Spectrum of the first time series. * ``"spectrum_b"``: Spectrum of the second time series. * ``"spectral_ratio"``: The ratio of both spectra. * ``"frequencies"``: The used frequency bins for the spectra. """ npts = len(data_a) if len(data_b) != npts: raise ValueError("Input arrays must have the same length!") if nfft is None: nfft = npts elif nfft < npts: raise ValueError("nfft must be larger then the number of samples in " "the array.") # Deconvolution utilizes the 32bit version. mt = _MtspecType("float32") # Use the optimal number of tapers in case no number is specified. if number_of_tapers is None: number_of_tapers = int(2 * time_bandwidth) - 1 # Transform the data to work with the library. data_a = np.require(data_a, mt.float, requirements=[mt.order]) data_b = np.require(data_b, mt.float, requirements=[mt.order]) nf = nfft // 2 + 1 # Internally uses integers if demean: demean = 1 else: demean = 0 # iad = 0 are adaptive, iad = 1 are constant weight - this is # counter intuitive. if weights == "constant": adaptive = 1 elif weights == "adaptive": adaptive = 0 else: raise ValueError('Weights must be either "adaptive" or "constant".') tfun = mt.empty(nfft) freq = mt.empty(nf) spec_ratio = mt.empty(nf) speci = mt.empty(nf) specj = mt.empty(nf) mtspeclib.mt_deconv_( C.byref(C.c_int(int(npts))), C.byref(C.c_int(int(nfft))), C.byref(C.c_float(float(delta))), mt.p(data_a), mt.p(data_b), C.byref(C.c_float(float(time_bandwidth))), C.byref(C.c_int(int(number_of_tapers))), C.byref(C.c_int(int(nf))), C.byref(C.c_int(adaptive)), mt.p(freq), mt.p(tfun), mt.p(spec_ratio), mt.p(speci), mt.p(specj), C.byref(C.c_int(demean)), C.byref(C.c_float(fmax))) return { "frequencies": freq, "deconvolved": tfun, "spectral_ratio": spec_ratio, "spectrum_a": speci, "spectrum_b": specj }
[ "def", "mt_deconvolve", "(", "data_a", ",", "data_b", ",", "delta", ",", "nfft", "=", "None", ",", "time_bandwidth", "=", "None", ",", "number_of_tapers", "=", "None", ",", "weights", "=", "\"adaptive\"", ",", "demean", "=", "True", ",", "fmax", "=", "0....
Deconvolve two time series using multitapers. This uses the eigencoefficients and the weights from the multitaper spectral estimations and more or less follows this paper: .. |br| raw:: html <br /> **Receiver Functions from Multiple-Taper Spectral Correlation Estimates** *Jeffrey Park, Vadim Levin* |br| Bulletin of the Seismological Society of America Dec 2000, 90 (6) 1507-1520 http://dx.doi.org/10.1785/0119990122 :type data_a: :class:`numpy.ndarray` :param data_a: Data for first time series. :type data_b: :class:`numpy.ndarray` :param data_b: Data for second time series. :type delta: float :param delta: Sample spacing of the data. :type nfft: int :param nfft: Number of points for the FFT. If ``nfft == None``, no zero padding will be applied before the FFT. :type time_bandwidth: float :param time_bandwidth: Time-bandwidth product. Common values are 2, 3, 4, and numbers in between. :type number_of_tapers: int :param number_of_tapers: Number of tapers to use. Defaults to ``int(2*time_bandwidth) - 1``. This is maximum senseful amount. More tapers will have no great influence on the final spectrum but increase the calculation time. Use fewer tapers for a faster calculation. :type weights: str :param weights: ``"adaptive"`` or ``"constant"`` weights. :type deman: bool :param demean: Force the complex TF to be demeaned. :type fmax: float :param fmax: Maximum frequency for lowpass cosine filter. Set this to zero to not have a filter. :return: Returns a dictionary with 5 :class:`numpy.ndarray`'s. See the note below. .. note:: Returns a dictionary with five arrays: * ``"deconvolved"``: Deconvolved time series. * ``"spectrum_a"``: Spectrum of the first time series. * ``"spectrum_b"``: Spectrum of the second time series. * ``"spectral_ratio"``: The ratio of both spectra. * ``"frequencies"``: The used frequency bins for the spectra.
[ "Deconvolve", "two", "time", "series", "using", "multitapers", "." ]
06561b6370f13fcb2e731470ba0f7314f4b2362d
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/multitaper.py#L623-L749
train
24,023
krischer/mtspec
mtspec/multitaper.py
_MtspecType.empty
def empty(self, shape, complex=False): """ A wrapper around np.empty which automatically sets the correct type and returns an empty array. :param shape: The shape of the array in np.empty format """ if complex: return np.empty(shape, dtype=self.complex, order=self.order) return np.empty(shape, dtype=self.float, order=self.order)
python
def empty(self, shape, complex=False): """ A wrapper around np.empty which automatically sets the correct type and returns an empty array. :param shape: The shape of the array in np.empty format """ if complex: return np.empty(shape, dtype=self.complex, order=self.order) return np.empty(shape, dtype=self.float, order=self.order)
[ "def", "empty", "(", "self", ",", "shape", ",", "complex", "=", "False", ")", ":", "if", "complex", ":", "return", "np", ".", "empty", "(", "shape", ",", "dtype", "=", "self", ".", "complex", ",", "order", "=", "self", ".", "order", ")", "return", ...
A wrapper around np.empty which automatically sets the correct type and returns an empty array. :param shape: The shape of the array in np.empty format
[ "A", "wrapper", "around", "np", ".", "empty", "which", "automatically", "sets", "the", "correct", "type", "and", "returns", "an", "empty", "array", "." ]
06561b6370f13fcb2e731470ba0f7314f4b2362d
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/multitaper.py#L775-L784
train
24,024
krischer/mtspec
mtspec/util.py
signal_bursts
def signal_bursts(): """ Generates a signal with two bursts inside. Useful for testing time frequency distributions. :returns: Generated signal :rtype: numpy.ndarray """ np.random.seed(815) length = 5 * 512 # Baseline low frequency plus noise. data = np.sin(np.linspace(0, 80 * np.pi, length)) noise = np.random.ranf(length) noise /= noise.max() noise /= 15 data += noise # Double last two fifths of the signal. data[-2 * 512:] *= 2.0 chirp1 = 2.5 * np.sin(np.linspace(0, 400 * np.pi, 512)) chirp1 *= np.linspace(1, 0, 512) data[512:2 * 512] += chirp1 # Add second transient signal. chirp2 = 5.0 * np.sin(np.linspace(0, 200 * np.pi, 512)) chirp2 *= np.linspace(1, 0, 512) data[3 * 512:4 * 512] += chirp2 return data
python
def signal_bursts(): """ Generates a signal with two bursts inside. Useful for testing time frequency distributions. :returns: Generated signal :rtype: numpy.ndarray """ np.random.seed(815) length = 5 * 512 # Baseline low frequency plus noise. data = np.sin(np.linspace(0, 80 * np.pi, length)) noise = np.random.ranf(length) noise /= noise.max() noise /= 15 data += noise # Double last two fifths of the signal. data[-2 * 512:] *= 2.0 chirp1 = 2.5 * np.sin(np.linspace(0, 400 * np.pi, 512)) chirp1 *= np.linspace(1, 0, 512) data[512:2 * 512] += chirp1 # Add second transient signal. chirp2 = 5.0 * np.sin(np.linspace(0, 200 * np.pi, 512)) chirp2 *= np.linspace(1, 0, 512) data[3 * 512:4 * 512] += chirp2 return data
[ "def", "signal_bursts", "(", ")", ":", "np", ".", "random", ".", "seed", "(", "815", ")", "length", "=", "5", "*", "512", "# Baseline low frequency plus noise.", "data", "=", "np", ".", "sin", "(", "np", ".", "linspace", "(", "0", ",", "80", "*", "np...
Generates a signal with two bursts inside. Useful for testing time frequency distributions. :returns: Generated signal :rtype: numpy.ndarray
[ "Generates", "a", "signal", "with", "two", "bursts", "inside", ".", "Useful", "for", "testing", "time", "frequency", "distributions", "." ]
06561b6370f13fcb2e731470ba0f7314f4b2362d
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/util.py#L57-L86
train
24,025
krischer/mtspec
mtspec/util.py
linear_chirp
def linear_chirp(npts=2000): """ Generates a simple linear chirp. :param npts: Number of samples. :type npts: int :returns: Generated signal :rtype: numpy.ndarray """ time = np.linspace(0, 20, npts) chirp = np.sin(0.2 * np.pi * (0.1 + 24.0 / 2.0 * time) * time) return chirp
python
def linear_chirp(npts=2000): """ Generates a simple linear chirp. :param npts: Number of samples. :type npts: int :returns: Generated signal :rtype: numpy.ndarray """ time = np.linspace(0, 20, npts) chirp = np.sin(0.2 * np.pi * (0.1 + 24.0 / 2.0 * time) * time) return chirp
[ "def", "linear_chirp", "(", "npts", "=", "2000", ")", ":", "time", "=", "np", ".", "linspace", "(", "0", ",", "20", ",", "npts", ")", "chirp", "=", "np", ".", "sin", "(", "0.2", "*", "np", ".", "pi", "*", "(", "0.1", "+", "24.0", "/", "2.0", ...
Generates a simple linear chirp. :param npts: Number of samples. :type npts: int :returns: Generated signal :rtype: numpy.ndarray
[ "Generates", "a", "simple", "linear", "chirp", "." ]
06561b6370f13fcb2e731470ba0f7314f4b2362d
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/util.py#L89-L100
train
24,026
krischer/mtspec
mtspec/util.py
exponential_chirp
def exponential_chirp(npts=2000): """ Generates an exponential chirp. :param npts: Number of samples. :type npts: int :returns: Generated signal :rtype: numpy.ndarray """ time = np.linspace(0, 20, npts) chirp = np.sin(2 * np.pi * 0.2 * (1.3 ** time - 1) / np.log(1.3)) return chirp
python
def exponential_chirp(npts=2000): """ Generates an exponential chirp. :param npts: Number of samples. :type npts: int :returns: Generated signal :rtype: numpy.ndarray """ time = np.linspace(0, 20, npts) chirp = np.sin(2 * np.pi * 0.2 * (1.3 ** time - 1) / np.log(1.3)) return chirp
[ "def", "exponential_chirp", "(", "npts", "=", "2000", ")", ":", "time", "=", "np", ".", "linspace", "(", "0", ",", "20", ",", "npts", ")", "chirp", "=", "np", ".", "sin", "(", "2", "*", "np", ".", "pi", "*", "0.2", "*", "(", "1.3", "**", "tim...
Generates an exponential chirp. :param npts: Number of samples. :type npts: int :returns: Generated signal :rtype: numpy.ndarray
[ "Generates", "an", "exponential", "chirp", "." ]
06561b6370f13fcb2e731470ba0f7314f4b2362d
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/util.py#L103-L114
train
24,027
krischer/mtspec
setup.py
get_libgfortran_dir
def get_libgfortran_dir(): """ Helper function returning the library directory of libgfortran. Useful on OSX where the C compiler oftentimes has no knowledge of the library directories of the Fortran compiler. I don't think it can do any harm on Linux. """ for ending in [".3.dylib", ".dylib", ".3.so", ".so"]: try: p = Popen(['gfortran', "-print-file-name=libgfortran" + ending], stdout=PIPE, stderr=PIPE) p.stderr.close() line = p.stdout.readline().decode().strip() p.stdout.close() if os.path.exists(line): return [os.path.dirname(line)] except: continue return []
python
def get_libgfortran_dir(): """ Helper function returning the library directory of libgfortran. Useful on OSX where the C compiler oftentimes has no knowledge of the library directories of the Fortran compiler. I don't think it can do any harm on Linux. """ for ending in [".3.dylib", ".dylib", ".3.so", ".so"]: try: p = Popen(['gfortran', "-print-file-name=libgfortran" + ending], stdout=PIPE, stderr=PIPE) p.stderr.close() line = p.stdout.readline().decode().strip() p.stdout.close() if os.path.exists(line): return [os.path.dirname(line)] except: continue return []
[ "def", "get_libgfortran_dir", "(", ")", ":", "for", "ending", "in", "[", "\".3.dylib\"", ",", "\".dylib\"", ",", "\".3.so\"", ",", "\".so\"", "]", ":", "try", ":", "p", "=", "Popen", "(", "[", "'gfortran'", ",", "\"-print-file-name=libgfortran\"", "+", "endi...
Helper function returning the library directory of libgfortran. Useful on OSX where the C compiler oftentimes has no knowledge of the library directories of the Fortran compiler. I don't think it can do any harm on Linux.
[ "Helper", "function", "returning", "the", "library", "directory", "of", "libgfortran", ".", "Useful", "on", "OSX", "where", "the", "C", "compiler", "oftentimes", "has", "no", "knowledge", "of", "the", "library", "directories", "of", "the", "Fortran", "compiler",...
06561b6370f13fcb2e731470ba0f7314f4b2362d
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/setup.py#L116-L134
train
24,028
pyGrowler/Growler
growler/utils/proto.py
PrototypeObject.create
def create(cls, obj): """ Create a new prototype object with the argument as the source prototype. .. Note: This does not `initialize` the newly created object any more than setting its prototype. Calling the __init__ method is usually unnecessary as all initialization data should be in the original prototype object already. If required, call __init__ explicitly: >>> proto_obj = MyProtoObj(1, 2, 3) >>> obj = MyProtoObj.create(proto_obj) >>> obj.__init__(1, 2, 3) """ self = cls.__new__(cls) self.__proto__ = obj return self
python
def create(cls, obj): """ Create a new prototype object with the argument as the source prototype. .. Note: This does not `initialize` the newly created object any more than setting its prototype. Calling the __init__ method is usually unnecessary as all initialization data should be in the original prototype object already. If required, call __init__ explicitly: >>> proto_obj = MyProtoObj(1, 2, 3) >>> obj = MyProtoObj.create(proto_obj) >>> obj.__init__(1, 2, 3) """ self = cls.__new__(cls) self.__proto__ = obj return self
[ "def", "create", "(", "cls", ",", "obj", ")", ":", "self", "=", "cls", ".", "__new__", "(", "cls", ")", "self", ".", "__proto__", "=", "obj", "return", "self" ]
Create a new prototype object with the argument as the source prototype. .. Note: This does not `initialize` the newly created object any more than setting its prototype. Calling the __init__ method is usually unnecessary as all initialization data should be in the original prototype object already. If required, call __init__ explicitly: >>> proto_obj = MyProtoObj(1, 2, 3) >>> obj = MyProtoObj.create(proto_obj) >>> obj.__init__(1, 2, 3)
[ "Create", "a", "new", "prototype", "object", "with", "the", "argument", "as", "the", "source", "prototype", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/utils/proto.py#L41-L63
train
24,029
pyGrowler/Growler
growler/utils/proto.py
PrototypeObject.bind
def bind(self, func): """ Take a function and create a bound method """ if self.__methods__ is None: self.__methods__ = {} self.__methods__[func.__name__] = BoundFunction(func)
python
def bind(self, func): """ Take a function and create a bound method """ if self.__methods__ is None: self.__methods__ = {} self.__methods__[func.__name__] = BoundFunction(func)
[ "def", "bind", "(", "self", ",", "func", ")", ":", "if", "self", ".", "__methods__", "is", "None", ":", "self", ".", "__methods__", "=", "{", "}", "self", ".", "__methods__", "[", "func", ".", "__name__", "]", "=", "BoundFunction", "(", "func", ")" ]
Take a function and create a bound method
[ "Take", "a", "function", "and", "create", "a", "bound", "method" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/utils/proto.py#L65-L71
train
24,030
pyGrowler/Growler
growler/utils/proto.py
PrototypeObject.has_own_property
def has_own_property(self, attr): """ Returns if the property """ try: object.__getattribute__(self, attr) except AttributeError: return False else: return True
python
def has_own_property(self, attr): """ Returns if the property """ try: object.__getattribute__(self, attr) except AttributeError: return False else: return True
[ "def", "has_own_property", "(", "self", ",", "attr", ")", ":", "try", ":", "object", ".", "__getattribute__", "(", "self", ",", "attr", ")", "except", "AttributeError", ":", "return", "False", "else", ":", "return", "True" ]
Returns if the property
[ "Returns", "if", "the", "property" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/utils/proto.py#L73-L82
train
24,031
pyGrowler/Growler
growler/core/application.py
Application.add_router
def add_router(self, path, router): """ Adds a router to the list of routers Args: path (str or regex): The path on which the router binds router (growler.Router): The router which will respond to requests Raises: TypeError: If `strict_router_check` attribute is True and the router is not an instance of growler.Router. """ if self.strict_router_check and not isinstance(router, Router): raise TypeError("Expected object of type Router, found %r" % type(router)) log.info("{} Adding router {} on path {}", id(self), router, path) self.middleware.add(path=path, func=router, method_mask=HTTPMethod.ALL,)
python
def add_router(self, path, router): """ Adds a router to the list of routers Args: path (str or regex): The path on which the router binds router (growler.Router): The router which will respond to requests Raises: TypeError: If `strict_router_check` attribute is True and the router is not an instance of growler.Router. """ if self.strict_router_check and not isinstance(router, Router): raise TypeError("Expected object of type Router, found %r" % type(router)) log.info("{} Adding router {} on path {}", id(self), router, path) self.middleware.add(path=path, func=router, method_mask=HTTPMethod.ALL,)
[ "def", "add_router", "(", "self", ",", "path", ",", "router", ")", ":", "if", "self", ".", "strict_router_check", "and", "not", "isinstance", "(", "router", ",", "Router", ")", ":", "raise", "TypeError", "(", "\"Expected object of type Router, found %r\"", "%", ...
Adds a router to the list of routers Args: path (str or regex): The path on which the router binds router (growler.Router): The router which will respond to requests Raises: TypeError: If `strict_router_check` attribute is True and the router is not an instance of growler.Router.
[ "Adds", "a", "router", "to", "the", "list", "of", "routers" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/application.py#L296-L315
train
24,032
pyGrowler/Growler
growler/core/application.py
Application.create_server
def create_server(self, loop=None, as_coroutine=False, protocol_factory=None, **server_config): """ Helper function which constructs a listening server, using the default growler.http.protocol.Protocol which responds to this app. This function exists only to remove boilerplate code for starting up a growler app when using asyncio. Args: as_coroutine (bool): If True, this function does not wait for the server to be created, and only returns the coroutine generator object returned by loop.create_server. This mode should be used when already inside an async function. The default mode is to call :method:`run_until_complete` on the loop paramter, blocking until the server is created and added to the event loop. server_config (mixed): These keyword arguments parameters are passed directly to the BaseEventLoop.create_server function. Consult their documentation for details. loop (BaseEventLoop): This is the asyncio event loop used to provide the underlying `create_server` method, and, if as_coroutine is False, will block until the server is created. protocol_factory (callable): Function returning an asyncio protocol object (or more specifically, a `growler.aio.GrowlerProtocol` object) to be called upon client connection. The default is the :class:`GrowlerHttpProtocol` factory function. **server_config (mixed): These keyword arguments parameters are passed directly to the BaseEventLoop.create_server function. Consult their documentation for details. Returns: asyncio.Server: The result of asyncio.BaseEventLoop.create_server which has been passed to the event loop and setup with the provided parameters. This is returned if gen_coroutine is False (default). asyncio.coroutine: An asyncio.coroutine which will produce the asyncio.Server from the provided configuration parameters. This is returned if gen_coroutine is True. """ if loop is None: import asyncio loop = asyncio.get_event_loop() if protocol_factory is None: from growler.aio import GrowlerHTTPProtocol protocol_factory = GrowlerHTTPProtocol.get_factory create_server = loop.create_server( protocol_factory(self, loop=loop), **server_config ) if as_coroutine: return create_server else: return loop.run_until_complete(create_server)
python
def create_server(self, loop=None, as_coroutine=False, protocol_factory=None, **server_config): """ Helper function which constructs a listening server, using the default growler.http.protocol.Protocol which responds to this app. This function exists only to remove boilerplate code for starting up a growler app when using asyncio. Args: as_coroutine (bool): If True, this function does not wait for the server to be created, and only returns the coroutine generator object returned by loop.create_server. This mode should be used when already inside an async function. The default mode is to call :method:`run_until_complete` on the loop paramter, blocking until the server is created and added to the event loop. server_config (mixed): These keyword arguments parameters are passed directly to the BaseEventLoop.create_server function. Consult their documentation for details. loop (BaseEventLoop): This is the asyncio event loop used to provide the underlying `create_server` method, and, if as_coroutine is False, will block until the server is created. protocol_factory (callable): Function returning an asyncio protocol object (or more specifically, a `growler.aio.GrowlerProtocol` object) to be called upon client connection. The default is the :class:`GrowlerHttpProtocol` factory function. **server_config (mixed): These keyword arguments parameters are passed directly to the BaseEventLoop.create_server function. Consult their documentation for details. Returns: asyncio.Server: The result of asyncio.BaseEventLoop.create_server which has been passed to the event loop and setup with the provided parameters. This is returned if gen_coroutine is False (default). asyncio.coroutine: An asyncio.coroutine which will produce the asyncio.Server from the provided configuration parameters. This is returned if gen_coroutine is True. """ if loop is None: import asyncio loop = asyncio.get_event_loop() if protocol_factory is None: from growler.aio import GrowlerHTTPProtocol protocol_factory = GrowlerHTTPProtocol.get_factory create_server = loop.create_server( protocol_factory(self, loop=loop), **server_config ) if as_coroutine: return create_server else: return loop.run_until_complete(create_server)
[ "def", "create_server", "(", "self", ",", "loop", "=", "None", ",", "as_coroutine", "=", "False", ",", "protocol_factory", "=", "None", ",", "*", "*", "server_config", ")", ":", "if", "loop", "is", "None", ":", "import", "asyncio", "loop", "=", "asyncio"...
Helper function which constructs a listening server, using the default growler.http.protocol.Protocol which responds to this app. This function exists only to remove boilerplate code for starting up a growler app when using asyncio. Args: as_coroutine (bool): If True, this function does not wait for the server to be created, and only returns the coroutine generator object returned by loop.create_server. This mode should be used when already inside an async function. The default mode is to call :method:`run_until_complete` on the loop paramter, blocking until the server is created and added to the event loop. server_config (mixed): These keyword arguments parameters are passed directly to the BaseEventLoop.create_server function. Consult their documentation for details. loop (BaseEventLoop): This is the asyncio event loop used to provide the underlying `create_server` method, and, if as_coroutine is False, will block until the server is created. protocol_factory (callable): Function returning an asyncio protocol object (or more specifically, a `growler.aio.GrowlerProtocol` object) to be called upon client connection. The default is the :class:`GrowlerHttpProtocol` factory function. **server_config (mixed): These keyword arguments parameters are passed directly to the BaseEventLoop.create_server function. Consult their documentation for details. Returns: asyncio.Server: The result of asyncio.BaseEventLoop.create_server which has been passed to the event loop and setup with the provided parameters. This is returned if gen_coroutine is False (default). asyncio.coroutine: An asyncio.coroutine which will produce the asyncio.Server from the provided configuration parameters. This is returned if gen_coroutine is True.
[ "Helper", "function", "which", "constructs", "a", "listening", "server", "using", "the", "default", "growler", ".", "http", ".", "protocol", ".", "Protocol", "which", "responds", "to", "this", "app", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/application.py#L616-L683
train
24,033
pyGrowler/Growler
growler/core/application.py
Application.create_server_and_run_forever
def create_server_and_run_forever(self, loop=None, **server_config): """ Helper function which constructs an HTTP server and listens the loop forever. This function exists only to remove boilerplate code for starting up a growler app. Args: **server_config: These keyword arguments are forwarded directly to the BaseEventLoop.create_server function. Consult their documentation for details. Parameters: loop (asyncio.BaseEventLoop): Optional parameter for specifying an event loop which will handle socket setup. **server_config: These keyword arguments are forwarded directly to the create_server function. """ if loop is None: import asyncio loop = asyncio.get_event_loop() self.create_server(loop=loop, **server_config) try: loop.run_forever() except KeyboardInterrupt: pass
python
def create_server_and_run_forever(self, loop=None, **server_config): """ Helper function which constructs an HTTP server and listens the loop forever. This function exists only to remove boilerplate code for starting up a growler app. Args: **server_config: These keyword arguments are forwarded directly to the BaseEventLoop.create_server function. Consult their documentation for details. Parameters: loop (asyncio.BaseEventLoop): Optional parameter for specifying an event loop which will handle socket setup. **server_config: These keyword arguments are forwarded directly to the create_server function. """ if loop is None: import asyncio loop = asyncio.get_event_loop() self.create_server(loop=loop, **server_config) try: loop.run_forever() except KeyboardInterrupt: pass
[ "def", "create_server_and_run_forever", "(", "self", ",", "loop", "=", "None", ",", "*", "*", "server_config", ")", ":", "if", "loop", "is", "None", ":", "import", "asyncio", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "self", ".", "create_ser...
Helper function which constructs an HTTP server and listens the loop forever. This function exists only to remove boilerplate code for starting up a growler app. Args: **server_config: These keyword arguments are forwarded directly to the BaseEventLoop.create_server function. Consult their documentation for details. Parameters: loop (asyncio.BaseEventLoop): Optional parameter for specifying an event loop which will handle socket setup. **server_config: These keyword arguments are forwarded directly to the create_server function.
[ "Helper", "function", "which", "constructs", "an", "HTTP", "server", "and", "listens", "the", "loop", "forever", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/application.py#L685-L712
train
24,034
pyGrowler/Growler
growler/middleware/renderer.py
RenderEngine.find_template_filename
def find_template_filename(self, template_name): """ Searches for a file matching the given template name. If found, this method returns the pathlib.Path object of the found template file. Args: template_name (str): Name of the template, with or without a file extension. Returns: pathlib.Path: Path to the matching filename. """ def next_file(): filename = self.path / template_name yield filename try: exts = self.default_file_extensions except AttributeError: return strfilename = str(filename) for ext in exts: yield Path(strfilename + ext) for filename in next_file(): if filename.is_file(): return filename
python
def find_template_filename(self, template_name): """ Searches for a file matching the given template name. If found, this method returns the pathlib.Path object of the found template file. Args: template_name (str): Name of the template, with or without a file extension. Returns: pathlib.Path: Path to the matching filename. """ def next_file(): filename = self.path / template_name yield filename try: exts = self.default_file_extensions except AttributeError: return strfilename = str(filename) for ext in exts: yield Path(strfilename + ext) for filename in next_file(): if filename.is_file(): return filename
[ "def", "find_template_filename", "(", "self", ",", "template_name", ")", ":", "def", "next_file", "(", ")", ":", "filename", "=", "self", ".", "path", "/", "template_name", "yield", "filename", "try", ":", "exts", "=", "self", ".", "default_file_extensions", ...
Searches for a file matching the given template name. If found, this method returns the pathlib.Path object of the found template file. Args: template_name (str): Name of the template, with or without a file extension. Returns: pathlib.Path: Path to the matching filename.
[ "Searches", "for", "a", "file", "matching", "the", "given", "template", "name", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/middleware/renderer.py#L141-L170
train
24,035
pyGrowler/Growler
growler/http/responder.py
GrowlerHTTPResponder.set_request_line
def set_request_line(self, method, url, version): """ Sets the request line on the responder. """ self.parsed_request = (method, url, version) self.request = { 'method': method, 'url': url, 'version': version }
python
def set_request_line(self, method, url, version): """ Sets the request line on the responder. """ self.parsed_request = (method, url, version) self.request = { 'method': method, 'url': url, 'version': version }
[ "def", "set_request_line", "(", "self", ",", "method", ",", "url", ",", "version", ")", ":", "self", ".", "parsed_request", "=", "(", "method", ",", "url", ",", "version", ")", "self", ".", "request", "=", "{", "'method'", ":", "method", ",", "'url'", ...
Sets the request line on the responder.
[ "Sets", "the", "request", "line", "on", "the", "responder", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/responder.py#L186-L195
train
24,036
pyGrowler/Growler
growler/http/responder.py
GrowlerHTTPResponder.init_body_buffer
def init_body_buffer(self, method, headers): """ Sets up the body_buffer and content_length attributes based on method and headers. """ content_length = headers.get("CONTENT-LENGTH", None) if method in (HTTPMethod.POST, HTTPMethod.PUT): if content_length is None: raise HTTPErrorBadRequest("HTTP Method requires a CONTENT-LENGTH header") self.content_length = int(content_length) self.body_buffer = bytearray(0) elif content_length is not None: raise HTTPErrorBadRequest( "HTTP method %s may NOT have a CONTENT-LENGTH header" )
python
def init_body_buffer(self, method, headers): """ Sets up the body_buffer and content_length attributes based on method and headers. """ content_length = headers.get("CONTENT-LENGTH", None) if method in (HTTPMethod.POST, HTTPMethod.PUT): if content_length is None: raise HTTPErrorBadRequest("HTTP Method requires a CONTENT-LENGTH header") self.content_length = int(content_length) self.body_buffer = bytearray(0) elif content_length is not None: raise HTTPErrorBadRequest( "HTTP method %s may NOT have a CONTENT-LENGTH header" )
[ "def", "init_body_buffer", "(", "self", ",", "method", ",", "headers", ")", ":", "content_length", "=", "headers", ".", "get", "(", "\"CONTENT-LENGTH\"", ",", "None", ")", "if", "method", "in", "(", "HTTPMethod", ".", "POST", ",", "HTTPMethod", ".", "PUT",...
Sets up the body_buffer and content_length attributes based on method and headers.
[ "Sets", "up", "the", "body_buffer", "and", "content_length", "attributes", "based", "on", "method", "and", "headers", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/responder.py#L197-L213
train
24,037
pyGrowler/Growler
growler/http/responder.py
GrowlerHTTPResponder.build_req_and_res
def build_req_and_res(self): """ Simple method which calls the request and response factories the responder was given, and returns the pair. """ req = self.build_req(self, self.headers) res = self.build_res(self._handler) return req, res
python
def build_req_and_res(self): """ Simple method which calls the request and response factories the responder was given, and returns the pair. """ req = self.build_req(self, self.headers) res = self.build_res(self._handler) return req, res
[ "def", "build_req_and_res", "(", "self", ")", ":", "req", "=", "self", ".", "build_req", "(", "self", ",", "self", ".", "headers", ")", "res", "=", "self", ".", "build_res", "(", "self", ".", "_handler", ")", "return", "req", ",", "res" ]
Simple method which calls the request and response factories the responder was given, and returns the pair.
[ "Simple", "method", "which", "calls", "the", "request", "and", "response", "factories", "the", "responder", "was", "given", "and", "returns", "the", "pair", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/responder.py#L215-L222
train
24,038
pyGrowler/Growler
growler/http/responder.py
GrowlerHTTPResponder.validate_and_store_body_data
def validate_and_store_body_data(self, data): """ Attempts simple body data validation by comparining incoming data to the content length header. If passes store the data into self._buffer. Parameters: data (bytes): Incoming client data to be added to the body Raises: HTTPErrorBadRequest: Raised if data is sent when not expected, or if too much data is sent. """ # add data to end of buffer self.body_buffer[-1:] = data # if len(self.body_buffer) > self.content_length: problem = "Content length exceeds expected value (%d > %d)" % ( len(self.body_buffer), self.content_length ) raise HTTPErrorBadRequest(phrase=problem)
python
def validate_and_store_body_data(self, data): """ Attempts simple body data validation by comparining incoming data to the content length header. If passes store the data into self._buffer. Parameters: data (bytes): Incoming client data to be added to the body Raises: HTTPErrorBadRequest: Raised if data is sent when not expected, or if too much data is sent. """ # add data to end of buffer self.body_buffer[-1:] = data # if len(self.body_buffer) > self.content_length: problem = "Content length exceeds expected value (%d > %d)" % ( len(self.body_buffer), self.content_length ) raise HTTPErrorBadRequest(phrase=problem)
[ "def", "validate_and_store_body_data", "(", "self", ",", "data", ")", ":", "# add data to end of buffer", "self", ".", "body_buffer", "[", "-", "1", ":", "]", "=", "data", "#", "if", "len", "(", "self", ".", "body_buffer", ")", ">", "self", ".", "content_l...
Attempts simple body data validation by comparining incoming data to the content length header. If passes store the data into self._buffer. Parameters: data (bytes): Incoming client data to be added to the body Raises: HTTPErrorBadRequest: Raised if data is sent when not expected, or if too much data is sent.
[ "Attempts", "simple", "body", "data", "validation", "by", "comparining", "incoming", "data", "to", "the", "content", "length", "header", ".", "If", "passes", "store", "the", "data", "into", "self", ".", "_buffer", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/responder.py#L224-L246
train
24,039
pyGrowler/Growler
growler/aio/http_protocol.py
GrowlerHTTPProtocol.begin_application
def begin_application(self, req, res): """ Entry point for the application middleware chain for an asyncio event loop. """ # Add the middleware processing to the event loop - this *should* # change the call stack so any server errors do not link back to this # function self.loop.create_task(self.http_application.handle_client_request(req, res))
python
def begin_application(self, req, res): """ Entry point for the application middleware chain for an asyncio event loop. """ # Add the middleware processing to the event loop - this *should* # change the call stack so any server errors do not link back to this # function self.loop.create_task(self.http_application.handle_client_request(req, res))
[ "def", "begin_application", "(", "self", ",", "req", ",", "res", ")", ":", "# Add the middleware processing to the event loop - this *should*", "# change the call stack so any server errors do not link back to this", "# function", "self", ".", "loop", ".", "create_task", "(", "...
Entry point for the application middleware chain for an asyncio event loop.
[ "Entry", "point", "for", "the", "application", "middleware", "chain", "for", "an", "asyncio", "event", "loop", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/aio/http_protocol.py#L137-L145
train
24,040
pyGrowler/Growler
growler/middleware/static.py
Static.calculate_etag
def calculate_etag(file_path): """ Calculate an etag value Args: a_file (pathlib.Path): The filepath to the Returns: String of the etag value to be sent back in header """ stat = file_path.stat() etag = "%x-%x" % (stat.st_mtime_ns, stat.st_size) return etag
python
def calculate_etag(file_path): """ Calculate an etag value Args: a_file (pathlib.Path): The filepath to the Returns: String of the etag value to be sent back in header """ stat = file_path.stat() etag = "%x-%x" % (stat.st_mtime_ns, stat.st_size) return etag
[ "def", "calculate_etag", "(", "file_path", ")", ":", "stat", "=", "file_path", ".", "stat", "(", ")", "etag", "=", "\"%x-%x\"", "%", "(", "stat", ".", "st_mtime_ns", ",", "stat", ".", "st_size", ")", "return", "etag" ]
Calculate an etag value Args: a_file (pathlib.Path): The filepath to the Returns: String of the etag value to be sent back in header
[ "Calculate", "an", "etag", "value" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/middleware/static.py#L81-L93
train
24,041
pyGrowler/Growler
growler/http/response.py
HTTPResponse._set_default_headers
def _set_default_headers(self): """ Create some default headers that should be sent along with every HTTP response """ self.headers.setdefault('Date', self.get_current_time) self.headers.setdefault('Server', self.SERVER_INFO) self.headers.setdefault('Content-Length', "%d" % len(self.message)) if self.app.enabled('x-powered-by'): self.headers.setdefault('X-Powered-By', 'Growler')
python
def _set_default_headers(self): """ Create some default headers that should be sent along with every HTTP response """ self.headers.setdefault('Date', self.get_current_time) self.headers.setdefault('Server', self.SERVER_INFO) self.headers.setdefault('Content-Length', "%d" % len(self.message)) if self.app.enabled('x-powered-by'): self.headers.setdefault('X-Powered-By', 'Growler')
[ "def", "_set_default_headers", "(", "self", ")", ":", "self", ".", "headers", ".", "setdefault", "(", "'Date'", ",", "self", ".", "get_current_time", ")", "self", ".", "headers", ".", "setdefault", "(", "'Server'", ",", "self", ".", "SERVER_INFO", ")", "se...
Create some default headers that should be sent along with every HTTP response
[ "Create", "some", "default", "headers", "that", "should", "be", "sent", "along", "with", "every", "HTTP", "response" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L65-L74
train
24,042
pyGrowler/Growler
growler/http/response.py
HTTPResponse.send_headers
def send_headers(self): """ Sends the headers to the client """ self.events.sync_emit('headers') self._set_default_headers() header_str = self.status_line + self.EOL + str(self.headers) self.stream.write(header_str.encode()) self.events.sync_emit('after_headers')
python
def send_headers(self): """ Sends the headers to the client """ self.events.sync_emit('headers') self._set_default_headers() header_str = self.status_line + self.EOL + str(self.headers) self.stream.write(header_str.encode()) self.events.sync_emit('after_headers')
[ "def", "send_headers", "(", "self", ")", ":", "self", ".", "events", ".", "sync_emit", "(", "'headers'", ")", "self", ".", "_set_default_headers", "(", ")", "header_str", "=", "self", ".", "status_line", "+", "self", ".", "EOL", "+", "str", "(", "self", ...
Sends the headers to the client
[ "Sends", "the", "headers", "to", "the", "client" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L76-L84
train
24,043
pyGrowler/Growler
growler/http/response.py
HTTPResponse.end
def end(self): """ Ends the response. Useful for quickly ending connection with no data sent """ self.send_headers() self.write() self.write_eof() self.has_ended = True
python
def end(self): """ Ends the response. Useful for quickly ending connection with no data sent """ self.send_headers() self.write() self.write_eof() self.has_ended = True
[ "def", "end", "(", "self", ")", ":", "self", ".", "send_headers", "(", ")", "self", ".", "write", "(", ")", "self", ".", "write_eof", "(", ")", "self", ".", "has_ended", "=", "True" ]
Ends the response. Useful for quickly ending connection with no data sent
[ "Ends", "the", "response", ".", "Useful", "for", "quickly", "ending", "connection", "with", "no", "data", "sent" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L110-L118
train
24,044
pyGrowler/Growler
growler/http/response.py
HTTPResponse.redirect
def redirect(self, url, status=None): """ Redirect to the specified url, optional status code defaults to 302. """ self.status_code = 302 if status is None else status self.headers = Headers([('location', url)]) self.message = '' self.end()
python
def redirect(self, url, status=None): """ Redirect to the specified url, optional status code defaults to 302. """ self.status_code = 302 if status is None else status self.headers = Headers([('location', url)]) self.message = '' self.end()
[ "def", "redirect", "(", "self", ",", "url", ",", "status", "=", "None", ")", ":", "self", ".", "status_code", "=", "302", "if", "status", "is", "None", "else", "status", "self", ".", "headers", "=", "Headers", "(", "[", "(", "'location'", ",", "url",...
Redirect to the specified url, optional status code defaults to 302.
[ "Redirect", "to", "the", "specified", "url", "optional", "status", "code", "defaults", "to", "302", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L120-L127
train
24,045
pyGrowler/Growler
growler/http/response.py
HTTPResponse.set
def set(self, header, value=None): """Set header to the value""" if value is None: for k, v in header.items(): self.headers[k] = v else: self.headers[header] = value
python
def set(self, header, value=None): """Set header to the value""" if value is None: for k, v in header.items(): self.headers[k] = v else: self.headers[header] = value
[ "def", "set", "(", "self", ",", "header", ",", "value", "=", "None", ")", ":", "if", "value", "is", "None", ":", "for", "k", ",", "v", "in", "header", ".", "items", "(", ")", ":", "self", ".", "headers", "[", "k", "]", "=", "v", "else", ":", ...
Set header to the value
[ "Set", "header", "to", "the", "value" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L129-L135
train
24,046
pyGrowler/Growler
growler/http/response.py
HTTPResponse.links
def links(self, links): """Sets the Link """ s = ['<{}>; rel="{}"'.format(link, rel) for link, rel in links.items()] self.headers['Link'] = ','.join(s)
python
def links(self, links): """Sets the Link """ s = ['<{}>; rel="{}"'.format(link, rel) for link, rel in links.items()] self.headers['Link'] = ','.join(s)
[ "def", "links", "(", "self", ",", "links", ")", ":", "s", "=", "[", "'<{}>; rel=\"{}\"'", ".", "format", "(", "link", ",", "rel", ")", "for", "link", ",", "rel", "in", "links", ".", "items", "(", ")", "]", "self", ".", "headers", "[", "'Link'", "...
Sets the Link
[ "Sets", "the", "Link" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L161-L165
train
24,047
pyGrowler/Growler
growler/http/response.py
HTTPResponse.send_file
def send_file(self, filename, status=200): """ Reads in the file 'filename' and sends bytes to client Parameters ---------- filename : str Filename of the file to read status : int, optional The HTTP status code, defaults to 200 (OK) """ if isinstance(filename, Path) and sys.version_info >= (3, 5): self.message = filename.read_bytes() else: with io.FileIO(str(filename)) as f: self.message = f.read() self.status_code = status self.send_headers() self.write() self.write_eof()
python
def send_file(self, filename, status=200): """ Reads in the file 'filename' and sends bytes to client Parameters ---------- filename : str Filename of the file to read status : int, optional The HTTP status code, defaults to 200 (OK) """ if isinstance(filename, Path) and sys.version_info >= (3, 5): self.message = filename.read_bytes() else: with io.FileIO(str(filename)) as f: self.message = f.read() self.status_code = status self.send_headers() self.write() self.write_eof()
[ "def", "send_file", "(", "self", ",", "filename", ",", "status", "=", "200", ")", ":", "if", "isinstance", "(", "filename", ",", "Path", ")", "and", "sys", ".", "version_info", ">=", "(", "3", ",", "5", ")", ":", "self", ".", "message", "=", "filen...
Reads in the file 'filename' and sends bytes to client Parameters ---------- filename : str Filename of the file to read status : int, optional The HTTP status code, defaults to 200 (OK)
[ "Reads", "in", "the", "file", "filename", "and", "sends", "bytes", "to", "client" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L228-L247
train
24,048
pyGrowler/Growler
growler/http/response.py
Headers.update
def update(self, *args, **kwargs): """ Equivalent to the python dict update method. Update the dictionary with the key/value pairs from other, overwriting existing keys. Args: other (dict): The source of key value pairs to add to headers Keyword Args: All keyword arguments are stored in header directly Returns: None """ for next_dict in chain(args, (kwargs, )): for k, v in next_dict.items(): self[k] = v
python
def update(self, *args, **kwargs): """ Equivalent to the python dict update method. Update the dictionary with the key/value pairs from other, overwriting existing keys. Args: other (dict): The source of key value pairs to add to headers Keyword Args: All keyword arguments are stored in header directly Returns: None """ for next_dict in chain(args, (kwargs, )): for k, v in next_dict.items(): self[k] = v
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "next_dict", "in", "chain", "(", "args", ",", "(", "kwargs", ",", ")", ")", ":", "for", "k", ",", "v", "in", "next_dict", ".", "items", "(", ")", ":", "...
Equivalent to the python dict update method. Update the dictionary with the key/value pairs from other, overwriting existing keys. Args: other (dict): The source of key value pairs to add to headers Keyword Args: All keyword arguments are stored in header directly Returns: None
[ "Equivalent", "to", "the", "python", "dict", "update", "method", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L328-L345
train
24,049
pyGrowler/Growler
growler/http/response.py
Headers.add_header
def add_header(self, key, value, **params): """ Add a header to the collection, including potential parameters. Args: key (str): The name of the header value (str): The value to store under that key params: Option parameters to be appended to the value, automatically formatting them in a standard way """ key = self.escape(key) ci_key = key.casefold() def quoted_params(items): for p in items: param_name = self.escape(p[0]) param_val = self.de_quote(self.escape(p[1])) yield param_name, param_val sorted_items = sorted(params.items()) quoted_iter = ('%s="%s"' % p for p in quoted_params(sorted_items)) param_str = ' '.join(quoted_iter) if param_str: value = "%s; %s" % (value, param_str) self._header_data[ci_key] = (key, value)
python
def add_header(self, key, value, **params): """ Add a header to the collection, including potential parameters. Args: key (str): The name of the header value (str): The value to store under that key params: Option parameters to be appended to the value, automatically formatting them in a standard way """ key = self.escape(key) ci_key = key.casefold() def quoted_params(items): for p in items: param_name = self.escape(p[0]) param_val = self.de_quote(self.escape(p[1])) yield param_name, param_val sorted_items = sorted(params.items()) quoted_iter = ('%s="%s"' % p for p in quoted_params(sorted_items)) param_str = ' '.join(quoted_iter) if param_str: value = "%s; %s" % (value, param_str) self._header_data[ci_key] = (key, value)
[ "def", "add_header", "(", "self", ",", "key", ",", "value", ",", "*", "*", "params", ")", ":", "key", "=", "self", ".", "escape", "(", "key", ")", "ci_key", "=", "key", ".", "casefold", "(", ")", "def", "quoted_params", "(", "items", ")", ":", "f...
Add a header to the collection, including potential parameters. Args: key (str): The name of the header value (str): The value to store under that key params: Option parameters to be appended to the value, automatically formatting them in a standard way
[ "Add", "a", "header", "to", "the", "collection", "including", "potential", "parameters", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L347-L375
train
24,050
pyGrowler/Growler
examples/sessions.py
index
def index(req, res): """ Return root page of website. """ number = req.session.get('counter', -1) req.session['counter'] = int(number) + 1 print(" -- Session '{id}' returned {counter} times".format(**req.session)) msg = "Hello!! You've been here [[%s]] times" % (req.session['counter']) res.send_text(msg) req.session.save()
python
def index(req, res): """ Return root page of website. """ number = req.session.get('counter', -1) req.session['counter'] = int(number) + 1 print(" -- Session '{id}' returned {counter} times".format(**req.session)) msg = "Hello!! You've been here [[%s]] times" % (req.session['counter']) res.send_text(msg) req.session.save()
[ "def", "index", "(", "req", ",", "res", ")", ":", "number", "=", "req", ".", "session", ".", "get", "(", "'counter'", ",", "-", "1", ")", "req", ".", "session", "[", "'counter'", "]", "=", "int", "(", "number", ")", "+", "1", "print", "(", "\" ...
Return root page of website.
[ "Return", "root", "page", "of", "website", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/examples/sessions.py#L21-L30
train
24,051
pyGrowler/Growler
growler/http/request.py
HTTPRequest.body
async def body(self): """ A helper function which blocks until the body has been read completely. Returns the bytes of the body which the user should decode. If the request does not have a body part (i.e. it is a GET request) this function returns None. """ if not isinstance(self._body, bytes): self._body = await self._body return self._body
python
async def body(self): """ A helper function which blocks until the body has been read completely. Returns the bytes of the body which the user should decode. If the request does not have a body part (i.e. it is a GET request) this function returns None. """ if not isinstance(self._body, bytes): self._body = await self._body return self._body
[ "async", "def", "body", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "_body", ",", "bytes", ")", ":", "self", ".", "_body", "=", "await", "self", ".", "_body", "return", "self", ".", "_body" ]
A helper function which blocks until the body has been read completely. Returns the bytes of the body which the user should decode. If the request does not have a body part (i.e. it is a GET request) this function returns None.
[ "A", "helper", "function", "which", "blocks", "until", "the", "body", "has", "been", "read", "completely", ".", "Returns", "the", "bytes", "of", "the", "body", "which", "the", "user", "should", "decode", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/request.py#L63-L74
train
24,052
pyGrowler/Growler
growler/utils/event_manager.py
event_emitter
def event_emitter(cls_=None, *, events=('*', )): """ A class-decorator which will add the specified events and the methods 'on' and 'emit' to the class. """ # create a dictionary from items in the 'events' parameter and with empty # lists as values event_dict = dict.fromkeys(events, []) # if '*' was in the events tuple - then pop it out of the event_dict # and store the fact that we may allow any event name to be added to the # event emitter. allow_any_eventname = event_dict.pop('*', False) == [] def _event_emitter(cls): def on(self, name, callback): """ Add a callback to the event named 'name'. Returns the object for chained 'on' calls. """ if not (callable(callback) or isawaitable(callback)): raise ValueError("Callback not callable: %r" % callback) try: event_dict[name].append(callback) except KeyError: if allow_any_eventname: event_dict[name] = [callback] else: msg = "Event Emitter has no event {!r}".format(name) raise KeyError(msg) return self async def emit(self, name): """ Coroutine which executes each of the callbacks added to the event identified by 'name' """ for cb in event_dict[name]: if isawaitable(cb): await cb else: cb() cls.on = on cls.emit = emit return cls if cls_ is None: return _event_emitter else: return _event_emitter(cls_)
python
def event_emitter(cls_=None, *, events=('*', )): """ A class-decorator which will add the specified events and the methods 'on' and 'emit' to the class. """ # create a dictionary from items in the 'events' parameter and with empty # lists as values event_dict = dict.fromkeys(events, []) # if '*' was in the events tuple - then pop it out of the event_dict # and store the fact that we may allow any event name to be added to the # event emitter. allow_any_eventname = event_dict.pop('*', False) == [] def _event_emitter(cls): def on(self, name, callback): """ Add a callback to the event named 'name'. Returns the object for chained 'on' calls. """ if not (callable(callback) or isawaitable(callback)): raise ValueError("Callback not callable: %r" % callback) try: event_dict[name].append(callback) except KeyError: if allow_any_eventname: event_dict[name] = [callback] else: msg = "Event Emitter has no event {!r}".format(name) raise KeyError(msg) return self async def emit(self, name): """ Coroutine which executes each of the callbacks added to the event identified by 'name' """ for cb in event_dict[name]: if isawaitable(cb): await cb else: cb() cls.on = on cls.emit = emit return cls if cls_ is None: return _event_emitter else: return _event_emitter(cls_)
[ "def", "event_emitter", "(", "cls_", "=", "None", ",", "*", ",", "events", "=", "(", "'*'", ",", ")", ")", ":", "# create a dictionary from items in the 'events' parameter and with empty", "# lists as values", "event_dict", "=", "dict", ".", "fromkeys", "(", "events...
A class-decorator which will add the specified events and the methods 'on' and 'emit' to the class.
[ "A", "class", "-", "decorator", "which", "will", "add", "the", "specified", "events", "and", "the", "methods", "on", "and", "emit", "to", "the", "class", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/utils/event_manager.py#L9-L64
train
24,053
pyGrowler/Growler
growler/utils/event_manager.py
Events.on
def on(self, name, _callback=None): """ Add a callback to the event named 'name'. Returns callback object for decorationable calls. """ # this is being used as a decorator if _callback is None: return lambda cb: self.on(name, cb) if not (callable(_callback) or isawaitable(_callback)): msg = "Callback not callable: {0!r}".format(_callback) raise ValueError(msg) self._event_list[name].append(_callback) return _callback
python
def on(self, name, _callback=None): """ Add a callback to the event named 'name'. Returns callback object for decorationable calls. """ # this is being used as a decorator if _callback is None: return lambda cb: self.on(name, cb) if not (callable(_callback) or isawaitable(_callback)): msg = "Callback not callable: {0!r}".format(_callback) raise ValueError(msg) self._event_list[name].append(_callback) return _callback
[ "def", "on", "(", "self", ",", "name", ",", "_callback", "=", "None", ")", ":", "# this is being used as a decorator", "if", "_callback", "is", "None", ":", "return", "lambda", "cb", ":", "self", ".", "on", "(", "name", ",", "cb", ")", "if", "not", "("...
Add a callback to the event named 'name'. Returns callback object for decorationable calls.
[ "Add", "a", "callback", "to", "the", "event", "named", "name", ".", "Returns", "callback", "object", "for", "decorationable", "calls", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/utils/event_manager.py#L109-L124
train
24,054
pyGrowler/Growler
growler/utils/event_manager.py
Events.emit
async def emit(self, name): """ Add a callback to the event named 'name'. Returns this object for chained 'on' calls. """ for cb in self._event_list[name]: if isawaitable(cb): await cb else: cb()
python
async def emit(self, name): """ Add a callback to the event named 'name'. Returns this object for chained 'on' calls. """ for cb in self._event_list[name]: if isawaitable(cb): await cb else: cb()
[ "async", "def", "emit", "(", "self", ",", "name", ")", ":", "for", "cb", "in", "self", ".", "_event_list", "[", "name", "]", ":", "if", "isawaitable", "(", "cb", ")", ":", "await", "cb", "else", ":", "cb", "(", ")" ]
Add a callback to the event named 'name'. Returns this object for chained 'on' calls.
[ "Add", "a", "callback", "to", "the", "event", "named", "name", ".", "Returns", "this", "object", "for", "chained", "on", "calls", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/utils/event_manager.py#L126-L135
train
24,055
pyGrowler/Growler
growler/core/router.py
routerify
def routerify(obj): """ Scan through attributes of object parameter looking for any which match a route signature. A router will be created and added to the object with parameter. Args: obj (object): The object (with attributes) from which to setup a router Returns: Router: The router created from attributes in the object. """ router = Router() for info in get_routing_attributes(obj): router.add_route(*info) obj.__growler_router = router return router
python
def routerify(obj): """ Scan through attributes of object parameter looking for any which match a route signature. A router will be created and added to the object with parameter. Args: obj (object): The object (with attributes) from which to setup a router Returns: Router: The router created from attributes in the object. """ router = Router() for info in get_routing_attributes(obj): router.add_route(*info) obj.__growler_router = router return router
[ "def", "routerify", "(", "obj", ")", ":", "router", "=", "Router", "(", ")", "for", "info", "in", "get_routing_attributes", "(", "obj", ")", ":", "router", ".", "add_route", "(", "*", "info", ")", "obj", ".", "__growler_router", "=", "router", "return", ...
Scan through attributes of object parameter looking for any which match a route signature. A router will be created and added to the object with parameter. Args: obj (object): The object (with attributes) from which to setup a router Returns: Router: The router created from attributes in the object.
[ "Scan", "through", "attributes", "of", "object", "parameter", "looking", "for", "any", "which", "match", "a", "route", "signature", ".", "A", "router", "will", "be", "created", "and", "added", "to", "the", "object", "with", "parameter", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/router.py#L281-L298
train
24,056
pyGrowler/Growler
growler/core/router.py
Router._add_route
def _add_route(self, method, path, middleware=None): """The implementation of adding a route""" if middleware is not None: self.add(method, path, middleware) return self else: # return a lambda that will return the 'func' argument return lambda func: ( self.add(method, path, func), func )[1]
python
def _add_route(self, method, path, middleware=None): """The implementation of adding a route""" if middleware is not None: self.add(method, path, middleware) return self else: # return a lambda that will return the 'func' argument return lambda func: ( self.add(method, path, func), func )[1]
[ "def", "_add_route", "(", "self", ",", "method", ",", "path", ",", "middleware", "=", "None", ")", ":", "if", "middleware", "is", "not", "None", ":", "self", ".", "add", "(", "method", ",", "path", ",", "middleware", ")", "return", "self", "else", ":...
The implementation of adding a route
[ "The", "implementation", "of", "adding", "a", "route" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/router.py#L68-L78
train
24,057
pyGrowler/Growler
growler/core/router.py
Router.use
def use(self, middleware, path=None): """ Call the provided middleware upon requests matching the path. If path is not provided or None, all requests will match. Args: middleware (callable): Callable with the signature ``(res, req) -> None`` path (Optional[str or regex]): a specific path the request must match for the middleware to be called. Returns: This router """ self.log.info(" Using middleware {}", middleware) if path is None: path = MiddlewareChain.ROOT_PATTERN self.add(HTTPMethod.ALL, path, middleware) return self
python
def use(self, middleware, path=None): """ Call the provided middleware upon requests matching the path. If path is not provided or None, all requests will match. Args: middleware (callable): Callable with the signature ``(res, req) -> None`` path (Optional[str or regex]): a specific path the request must match for the middleware to be called. Returns: This router """ self.log.info(" Using middleware {}", middleware) if path is None: path = MiddlewareChain.ROOT_PATTERN self.add(HTTPMethod.ALL, path, middleware) return self
[ "def", "use", "(", "self", ",", "middleware", ",", "path", "=", "None", ")", ":", "self", ".", "log", ".", "info", "(", "\" Using middleware {}\"", ",", "middleware", ")", "if", "path", "is", "None", ":", "path", "=", "MiddlewareChain", ".", "ROOT_PATTER...
Call the provided middleware upon requests matching the path. If path is not provided or None, all requests will match. Args: middleware (callable): Callable with the signature ``(res, req) -> None`` path (Optional[str or regex]): a specific path the request must match for the middleware to be called. Returns: This router
[ "Call", "the", "provided", "middleware", "upon", "requests", "matching", "the", "path", ".", "If", "path", "is", "not", "provided", "or", "None", "all", "requests", "will", "match", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/router.py#L86-L103
train
24,058
pyGrowler/Growler
growler/core/router.py
Router.sinatra_path_to_regex
def sinatra_path_to_regex(cls, path): """ Converts a sinatra-style path to a regex with named parameters. """ # Return the path if already a (compiled) regex if type(path) is cls.regex_type: return path # Build a regular expression string which is split on the '/' character regex = [ "(?P<{}>\w+)".format(segment[1:]) if cls.sinatra_param_regex.match(segment) else segment for segment in path.split('/') ] return re.compile('/'.join(regex))
python
def sinatra_path_to_regex(cls, path): """ Converts a sinatra-style path to a regex with named parameters. """ # Return the path if already a (compiled) regex if type(path) is cls.regex_type: return path # Build a regular expression string which is split on the '/' character regex = [ "(?P<{}>\w+)".format(segment[1:]) if cls.sinatra_param_regex.match(segment) else segment for segment in path.split('/') ] return re.compile('/'.join(regex))
[ "def", "sinatra_path_to_regex", "(", "cls", ",", "path", ")", ":", "# Return the path if already a (compiled) regex", "if", "type", "(", "path", ")", "is", "cls", ".", "regex_type", ":", "return", "path", "# Build a regular expression string which is split on the '/' charac...
Converts a sinatra-style path to a regex with named parameters.
[ "Converts", "a", "sinatra", "-", "style", "path", "to", "a", "regex", "with", "named", "parameters", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/router.py#L137-L153
train
24,059
pyGrowler/Growler
growler/http/parser.py
Parser._parse_and_store_headers
def _parse_and_store_headers(self): """ Coroutine used retrieve header data and parse each header until the body is found. """ header_storage = self._store_header() header_storage.send(None) for header_line in self._next_header_line(): if header_line is None: self._buffer += yield continue else: header_storage.send(header_line) self.headers = header_storage.send(None)
python
def _parse_and_store_headers(self): """ Coroutine used retrieve header data and parse each header until the body is found. """ header_storage = self._store_header() header_storage.send(None) for header_line in self._next_header_line(): if header_line is None: self._buffer += yield continue else: header_storage.send(header_line) self.headers = header_storage.send(None)
[ "def", "_parse_and_store_headers", "(", "self", ")", ":", "header_storage", "=", "self", ".", "_store_header", "(", ")", "header_storage", ".", "send", "(", "None", ")", "for", "header_line", "in", "self", ".", "_next_header_line", "(", ")", ":", "if", "head...
Coroutine used retrieve header data and parse each header until the body is found.
[ "Coroutine", "used", "retrieve", "header", "data", "and", "parse", "each", "header", "until", "the", "body", "is", "found", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/parser.py#L140-L156
train
24,060
pyGrowler/Growler
growler/http/parser.py
Parser._store_header
def _store_header(self): """ Logic & state behind storing headers. This is a coroutine that should be sent header lines in the usual fashion. Sending it None will indicate there are no more lines, and the dictionary of headers will be returned. """ key, value = None, None headers = [] header_line = yield while header_line is not None: if not header_line.startswith((b' ', b'\t')): if key: headers.append((key, value)) key, value = self.split_header_key_value(header_line) key = key.upper() else: next_val = header_line.strip().decode() if isinstance(value, list): value.append(next_val) else: value = [value, next_val] header_line = yield if key is not None: headers.append((key, value)) yield dict(headers)
python
def _store_header(self): """ Logic & state behind storing headers. This is a coroutine that should be sent header lines in the usual fashion. Sending it None will indicate there are no more lines, and the dictionary of headers will be returned. """ key, value = None, None headers = [] header_line = yield while header_line is not None: if not header_line.startswith((b' ', b'\t')): if key: headers.append((key, value)) key, value = self.split_header_key_value(header_line) key = key.upper() else: next_val = header_line.strip().decode() if isinstance(value, list): value.append(next_val) else: value = [value, next_val] header_line = yield if key is not None: headers.append((key, value)) yield dict(headers)
[ "def", "_store_header", "(", "self", ")", ":", "key", ",", "value", "=", "None", ",", "None", "headers", "=", "[", "]", "header_line", "=", "yield", "while", "header_line", "is", "not", "None", ":", "if", "not", "header_line", ".", "startswith", "(", "...
Logic & state behind storing headers. This is a coroutine that should be sent header lines in the usual fashion. Sending it None will indicate there are no more lines, and the dictionary of headers will be returned.
[ "Logic", "&", "state", "behind", "storing", "headers", ".", "This", "is", "a", "coroutine", "that", "should", "be", "sent", "header", "lines", "in", "the", "usual", "fashion", ".", "Sending", "it", "None", "will", "indicate", "there", "are", "no", "more", ...
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/parser.py#L158-L185
train
24,061
pyGrowler/Growler
growler/http/parser.py
Parser._store_request_line
def _store_request_line(self, req_line): """ Splits the request line given into three components. Ensures that the version and method are valid for this server, and uses the urllib.parse function to parse the request URI. Note: This method has the additional side effect of updating all request line related attributes of the parser. Returns: tuple: Tuple containing the parsed (method, parsed_url, version) Raises: HTTPErrorBadRequest: If request line is invalid HTTPErrorNotImplemented: If HTTP method is not recognized HTTPErrorVersionNotSupported: If HTTP version is not recognized. """ if not isinstance(req_line, str): try: req_line = self.raw_request_line = req_line.decode() except UnicodeDecodeError: raise HTTPErrorBadRequest try: self.method_str, self.original_url, self.version = req_line.split() except ValueError: raise HTTPErrorBadRequest() if self.version not in ('HTTP/1.1', 'HTTP/1.0'): raise HTTPErrorVersionNotSupported(self.version) # allow lowercase methodname? # self.method_str = self.method_str.upper() # save 'method' and get the correct function to finish processing try: self.method = HTTPMethod[self.method_str] except KeyError: # Method not found err = "Unknown HTTP Method '{}'".format(self.method_str) raise HTTPErrorNotImplemented(err) self._process_headers = { HTTPMethod.GET: self.process_get_headers, HTTPMethod.POST: self.process_post_headers }.get(self.method, lambda data: True) _, num_str = self.version.split('/', 1) self.HTTP_VERSION = tuple(num_str.split('.')) self.version_number = float(num_str) self.parsed_url = urlparse(self.original_url) self.path = unquote(self.parsed_url.path) self.query = parse_qs(self.parsed_url.query) return self.method, self.parsed_url, self.version
python
def _store_request_line(self, req_line): """ Splits the request line given into three components. Ensures that the version and method are valid for this server, and uses the urllib.parse function to parse the request URI. Note: This method has the additional side effect of updating all request line related attributes of the parser. Returns: tuple: Tuple containing the parsed (method, parsed_url, version) Raises: HTTPErrorBadRequest: If request line is invalid HTTPErrorNotImplemented: If HTTP method is not recognized HTTPErrorVersionNotSupported: If HTTP version is not recognized. """ if not isinstance(req_line, str): try: req_line = self.raw_request_line = req_line.decode() except UnicodeDecodeError: raise HTTPErrorBadRequest try: self.method_str, self.original_url, self.version = req_line.split() except ValueError: raise HTTPErrorBadRequest() if self.version not in ('HTTP/1.1', 'HTTP/1.0'): raise HTTPErrorVersionNotSupported(self.version) # allow lowercase methodname? # self.method_str = self.method_str.upper() # save 'method' and get the correct function to finish processing try: self.method = HTTPMethod[self.method_str] except KeyError: # Method not found err = "Unknown HTTP Method '{}'".format(self.method_str) raise HTTPErrorNotImplemented(err) self._process_headers = { HTTPMethod.GET: self.process_get_headers, HTTPMethod.POST: self.process_post_headers }.get(self.method, lambda data: True) _, num_str = self.version.split('/', 1) self.HTTP_VERSION = tuple(num_str.split('.')) self.version_number = float(num_str) self.parsed_url = urlparse(self.original_url) self.path = unquote(self.parsed_url.path) self.query = parse_qs(self.parsed_url.query) return self.method, self.parsed_url, self.version
[ "def", "_store_request_line", "(", "self", ",", "req_line", ")", ":", "if", "not", "isinstance", "(", "req_line", ",", "str", ")", ":", "try", ":", "req_line", "=", "self", ".", "raw_request_line", "=", "req_line", ".", "decode", "(", ")", "except", "Uni...
Splits the request line given into three components. Ensures that the version and method are valid for this server, and uses the urllib.parse function to parse the request URI. Note: This method has the additional side effect of updating all request line related attributes of the parser. Returns: tuple: Tuple containing the parsed (method, parsed_url, version) Raises: HTTPErrorBadRequest: If request line is invalid HTTPErrorNotImplemented: If HTTP method is not recognized HTTPErrorVersionNotSupported: If HTTP version is not recognized.
[ "Splits", "the", "request", "line", "given", "into", "three", "components", ".", "Ensures", "that", "the", "version", "and", "method", "are", "valid", "for", "this", "server", "and", "uses", "the", "urllib", ".", "parse", "function", "to", "parse", "the", ...
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/parser.py#L219-L277
train
24,062
pyGrowler/Growler
growler/http/parser.py
Parser.determine_newline
def determine_newline(data): """ Looks for a newline character in bytestring parameter 'data'. Currently only looks for strings '\r\n', '\n'. If '\n' is found at the first position of the string, this raises an exception. Parameters: data (bytes): The data to be searched Returns: None: If no-newline is found One of '\n', '\r\n': whichever is found first """ line_end_pos = data.find(b'\n') if line_end_pos == -1: return None elif line_end_pos == 0: return b'\n' prev_char = data[line_end_pos - 1] return b'\r\n' if (prev_char is b'\r'[0]) else b'\n'
python
def determine_newline(data): """ Looks for a newline character in bytestring parameter 'data'. Currently only looks for strings '\r\n', '\n'. If '\n' is found at the first position of the string, this raises an exception. Parameters: data (bytes): The data to be searched Returns: None: If no-newline is found One of '\n', '\r\n': whichever is found first """ line_end_pos = data.find(b'\n') if line_end_pos == -1: return None elif line_end_pos == 0: return b'\n' prev_char = data[line_end_pos - 1] return b'\r\n' if (prev_char is b'\r'[0]) else b'\n'
[ "def", "determine_newline", "(", "data", ")", ":", "line_end_pos", "=", "data", ".", "find", "(", "b'\\n'", ")", "if", "line_end_pos", "==", "-", "1", ":", "return", "None", "elif", "line_end_pos", "==", "0", ":", "return", "b'\\n'", "prev_char", "=", "d...
Looks for a newline character in bytestring parameter 'data'. Currently only looks for strings '\r\n', '\n'. If '\n' is found at the first position of the string, this raises an exception. Parameters: data (bytes): The data to be searched Returns: None: If no-newline is found One of '\n', '\r\n': whichever is found first
[ "Looks", "for", "a", "newline", "character", "in", "bytestring", "parameter", "data", ".", "Currently", "only", "looks", "for", "strings", "\\", "r", "\\", "n", "\\", "n", ".", "If", "\\", "n", "is", "found", "at", "the", "first", "position", "of", "th...
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/parser.py#L280-L303
train
24,063
pyGrowler/Growler
growler/core/middleware_chain.py
MiddlewareNode.path_split
def path_split(self, path): """ Splits a path into the part matching this middleware and the part remaining. If path does not exist, it returns a pair of None values. If the regex matches the entire pair, the second item in returned tuple is None. Args: path (str): The url to split Returns: Tuple matching_path (str or None): The beginning of the path which matches this middleware or None if it does not match remaining_path (str or None): The 'rest' of the path, following the matching part """ match = self.path.match(path) if match is None: return None, None # split string at position the_rest = path[match.end():] # ensure we split at a '/' character if the_rest: if match.group().endswith('/'): pass elif the_rest.startswith('/'): pass else: return None, None if self.IGNORE_TRAILING_SLASH and the_rest == '/': the_rest = '' return match, the_rest
python
def path_split(self, path): """ Splits a path into the part matching this middleware and the part remaining. If path does not exist, it returns a pair of None values. If the regex matches the entire pair, the second item in returned tuple is None. Args: path (str): The url to split Returns: Tuple matching_path (str or None): The beginning of the path which matches this middleware or None if it does not match remaining_path (str or None): The 'rest' of the path, following the matching part """ match = self.path.match(path) if match is None: return None, None # split string at position the_rest = path[match.end():] # ensure we split at a '/' character if the_rest: if match.group().endswith('/'): pass elif the_rest.startswith('/'): pass else: return None, None if self.IGNORE_TRAILING_SLASH and the_rest == '/': the_rest = '' return match, the_rest
[ "def", "path_split", "(", "self", ",", "path", ")", ":", "match", "=", "self", ".", "path", ".", "match", "(", "path", ")", "if", "match", "is", "None", ":", "return", "None", ",", "None", "# split string at position", "the_rest", "=", "path", "[", "ma...
Splits a path into the part matching this middleware and the part remaining. If path does not exist, it returns a pair of None values. If the regex matches the entire pair, the second item in returned tuple is None. Args: path (str): The url to split Returns: Tuple matching_path (str or None): The beginning of the path which matches this middleware or None if it does not match remaining_path (str or None): The 'rest' of the path, following the matching part
[ "Splits", "a", "path", "into", "the", "part", "matching", "this", "middleware", "and", "the", "part", "remaining", ".", "If", "path", "does", "not", "exist", "it", "returns", "a", "pair", "of", "None", "values", ".", "If", "the", "regex", "matches", "the...
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/middleware_chain.py#L66-L101
train
24,064
pyGrowler/Growler
growler/core/middleware_chain.py
MiddlewareChain.find_matching_middleware
def find_matching_middleware(self, method, path): """ Iterator handling the matching of middleware against a method+path pair. Yields the middleware, and the """ for mw in self.mw_list: if not mw.matches_method(method): continue # get the path matching this middleware and the 'rest' of the url # (i.e. the part that comes AFTER the match) to be potentially # matched later by a subchain path_match, rest_url = mw.path_split(path) if self.should_skip_middleware(mw, path_match, rest_url): continue yield mw, path_match, rest_url
python
def find_matching_middleware(self, method, path): """ Iterator handling the matching of middleware against a method+path pair. Yields the middleware, and the """ for mw in self.mw_list: if not mw.matches_method(method): continue # get the path matching this middleware and the 'rest' of the url # (i.e. the part that comes AFTER the match) to be potentially # matched later by a subchain path_match, rest_url = mw.path_split(path) if self.should_skip_middleware(mw, path_match, rest_url): continue yield mw, path_match, rest_url
[ "def", "find_matching_middleware", "(", "self", ",", "method", ",", "path", ")", ":", "for", "mw", "in", "self", ".", "mw_list", ":", "if", "not", "mw", ".", "matches_method", "(", "method", ")", ":", "continue", "# get the path matching this middleware and the ...
Iterator handling the matching of middleware against a method+path pair. Yields the middleware, and the
[ "Iterator", "handling", "the", "matching", "of", "middleware", "against", "a", "method", "+", "path", "pair", ".", "Yields", "the", "middleware", "and", "the" ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/middleware_chain.py#L183-L199
train
24,065
pyGrowler/Growler
growler/core/middleware_chain.py
MiddlewareChain.add
def add(self, method_mask, path, func): """ Add a function to the middleware chain. This function is returned when iterating over the chain with matching method and path. Args: method_mask (growler.http.HTTPMethod): A bitwise mask intended to match specific request methods. path (str or regex): An object with which to compare request urls func (callable): The function to be yieled from the generator upon a request matching the method_mask and path """ is_err = len(signature(func).parameters) == 3 is_subchain = isinstance(func, MiddlewareChain) tup = MiddlewareNode(func=func, mask=method_mask, path=path, is_errorhandler=is_err, is_subchain=is_subchain,) self.mw_list.append(tup)
python
def add(self, method_mask, path, func): """ Add a function to the middleware chain. This function is returned when iterating over the chain with matching method and path. Args: method_mask (growler.http.HTTPMethod): A bitwise mask intended to match specific request methods. path (str or regex): An object with which to compare request urls func (callable): The function to be yieled from the generator upon a request matching the method_mask and path """ is_err = len(signature(func).parameters) == 3 is_subchain = isinstance(func, MiddlewareChain) tup = MiddlewareNode(func=func, mask=method_mask, path=path, is_errorhandler=is_err, is_subchain=is_subchain,) self.mw_list.append(tup)
[ "def", "add", "(", "self", ",", "method_mask", ",", "path", ",", "func", ")", ":", "is_err", "=", "len", "(", "signature", "(", "func", ")", ".", "parameters", ")", "==", "3", "is_subchain", "=", "isinstance", "(", "func", ",", "MiddlewareChain", ")", ...
Add a function to the middleware chain. This function is returned when iterating over the chain with matching method and path. Args: method_mask (growler.http.HTTPMethod): A bitwise mask intended to match specific request methods. path (str or regex): An object with which to compare request urls func (callable): The function to be yieled from the generator upon a request matching the method_mask and path
[ "Add", "a", "function", "to", "the", "middleware", "chain", ".", "This", "function", "is", "returned", "when", "iterating", "over", "the", "chain", "with", "matching", "method", "and", "path", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/middleware_chain.py#L232-L251
train
24,066
pyGrowler/Growler
growler/core/middleware_chain.py
MiddlewareChain.count_all
def count_all(self): """ Returns the total number of middleware in this chain and subchains. """ return sum(x.func.count_all() if x.is_subchain else 1 for x in self)
python
def count_all(self): """ Returns the total number of middleware in this chain and subchains. """ return sum(x.func.count_all() if x.is_subchain else 1 for x in self)
[ "def", "count_all", "(", "self", ")", ":", "return", "sum", "(", "x", ".", "func", ".", "count_all", "(", ")", "if", "x", ".", "is_subchain", "else", "1", "for", "x", "in", "self", ")" ]
Returns the total number of middleware in this chain and subchains.
[ "Returns", "the", "total", "number", "of", "middleware", "in", "this", "chain", "and", "subchains", "." ]
90c923ff204f28b86a01d741224987a22f69540f
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/middleware_chain.py#L268-L272
train
24,067
coleifer/django-relationships
relationships/templatetags/relationship_tags.py
if_relationship
def if_relationship(parser, token): """ Determine if a certain type of relationship exists between two users. The ``status`` parameter must be a slug matching either the from_slug, to_slug or symmetrical_slug of a RelationshipStatus. Example:: {% if_relationship from_user to_user "friends" %} Here are pictures of me drinking alcohol {% else %} Sorry coworkers {% endif_relationship %} {% if_relationship from_user to_user "blocking" %} damn seo experts {% endif_relationship %} """ bits = list(token.split_contents()) if len(bits) != 4: raise TemplateSyntaxError("%r takes 3 arguments:\n%s" % (bits[0], if_relationship.__doc__)) end_tag = 'end' + bits[0] nodelist_true = parser.parse(('else', end_tag)) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse((end_tag,)) parser.delete_first_token() else: nodelist_false = template.NodeList() return IfRelationshipNode(nodelist_true, nodelist_false, *bits[1:])
python
def if_relationship(parser, token): """ Determine if a certain type of relationship exists between two users. The ``status`` parameter must be a slug matching either the from_slug, to_slug or symmetrical_slug of a RelationshipStatus. Example:: {% if_relationship from_user to_user "friends" %} Here are pictures of me drinking alcohol {% else %} Sorry coworkers {% endif_relationship %} {% if_relationship from_user to_user "blocking" %} damn seo experts {% endif_relationship %} """ bits = list(token.split_contents()) if len(bits) != 4: raise TemplateSyntaxError("%r takes 3 arguments:\n%s" % (bits[0], if_relationship.__doc__)) end_tag = 'end' + bits[0] nodelist_true = parser.parse(('else', end_tag)) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse((end_tag,)) parser.delete_first_token() else: nodelist_false = template.NodeList() return IfRelationshipNode(nodelist_true, nodelist_false, *bits[1:])
[ "def", "if_relationship", "(", "parser", ",", "token", ")", ":", "bits", "=", "list", "(", "token", ".", "split_contents", "(", ")", ")", "if", "len", "(", "bits", ")", "!=", "4", ":", "raise", "TemplateSyntaxError", "(", "\"%r takes 3 arguments:\\n%s\"", ...
Determine if a certain type of relationship exists between two users. The ``status`` parameter must be a slug matching either the from_slug, to_slug or symmetrical_slug of a RelationshipStatus. Example:: {% if_relationship from_user to_user "friends" %} Here are pictures of me drinking alcohol {% else %} Sorry coworkers {% endif_relationship %} {% if_relationship from_user to_user "blocking" %} damn seo experts {% endif_relationship %}
[ "Determine", "if", "a", "certain", "type", "of", "relationship", "exists", "between", "two", "users", ".", "The", "status", "parameter", "must", "be", "a", "slug", "matching", "either", "the", "from_slug", "to_slug", "or", "symmetrical_slug", "of", "a", "Relat...
f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805
https://github.com/coleifer/django-relationships/blob/f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805/relationships/templatetags/relationship_tags.py#L45-L74
train
24,068
coleifer/django-relationships
relationships/templatetags/relationship_tags.py
add_relationship_url
def add_relationship_url(user, status): """ Generate a url for adding a relationship on a given user. ``user`` is a User object, and ``status`` is either a relationship_status object or a string denoting a RelationshipStatus Usage:: href="{{ user|add_relationship_url:"following" }}" """ if isinstance(status, RelationshipStatus): status = status.from_slug return reverse('relationship_add', args=[user.username, status])
python
def add_relationship_url(user, status): """ Generate a url for adding a relationship on a given user. ``user`` is a User object, and ``status`` is either a relationship_status object or a string denoting a RelationshipStatus Usage:: href="{{ user|add_relationship_url:"following" }}" """ if isinstance(status, RelationshipStatus): status = status.from_slug return reverse('relationship_add', args=[user.username, status])
[ "def", "add_relationship_url", "(", "user", ",", "status", ")", ":", "if", "isinstance", "(", "status", ",", "RelationshipStatus", ")", ":", "status", "=", "status", ".", "from_slug", "return", "reverse", "(", "'relationship_add'", ",", "args", "=", "[", "us...
Generate a url for adding a relationship on a given user. ``user`` is a User object, and ``status`` is either a relationship_status object or a string denoting a RelationshipStatus Usage:: href="{{ user|add_relationship_url:"following" }}"
[ "Generate", "a", "url", "for", "adding", "a", "relationship", "on", "a", "given", "user", ".", "user", "is", "a", "User", "object", "and", "status", "is", "either", "a", "relationship_status", "object", "or", "a", "string", "denoting", "a", "RelationshipStat...
f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805
https://github.com/coleifer/django-relationships/blob/f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805/relationships/templatetags/relationship_tags.py#L78-L90
train
24,069
googlefonts/ufo2ft
Lib/ufo2ft/postProcessor.py
PostProcessor._rename_glyphs_from_ufo
def _rename_glyphs_from_ufo(self): """Rename glyphs using ufo.lib.public.postscriptNames in UFO.""" rename_map = self._build_production_names() otf = self.otf otf.setGlyphOrder([rename_map.get(n, n) for n in otf.getGlyphOrder()]) # we need to compile format 2 'post' table so that the 'extraNames' # attribute is updated with the list of the names outside the # standard Macintosh glyph order; otherwise, if one dumps the font # to TTX directly before compiling first, the post table will not # contain the extraNames. if 'post' in otf and otf['post'].formatType == 2.0: otf['post'].compile(self.otf) if 'CFF ' in otf: cff = otf['CFF '].cff.topDictIndex[0] char_strings = cff.CharStrings.charStrings cff.CharStrings.charStrings = { rename_map.get(n, n): v for n, v in char_strings.items()} cff.charset = [rename_map.get(n, n) for n in cff.charset]
python
def _rename_glyphs_from_ufo(self): """Rename glyphs using ufo.lib.public.postscriptNames in UFO.""" rename_map = self._build_production_names() otf = self.otf otf.setGlyphOrder([rename_map.get(n, n) for n in otf.getGlyphOrder()]) # we need to compile format 2 'post' table so that the 'extraNames' # attribute is updated with the list of the names outside the # standard Macintosh glyph order; otherwise, if one dumps the font # to TTX directly before compiling first, the post table will not # contain the extraNames. if 'post' in otf and otf['post'].formatType == 2.0: otf['post'].compile(self.otf) if 'CFF ' in otf: cff = otf['CFF '].cff.topDictIndex[0] char_strings = cff.CharStrings.charStrings cff.CharStrings.charStrings = { rename_map.get(n, n): v for n, v in char_strings.items()} cff.charset = [rename_map.get(n, n) for n in cff.charset]
[ "def", "_rename_glyphs_from_ufo", "(", "self", ")", ":", "rename_map", "=", "self", ".", "_build_production_names", "(", ")", "otf", "=", "self", ".", "otf", "otf", ".", "setGlyphOrder", "(", "[", "rename_map", ".", "get", "(", "n", ",", "n", ")", "for",...
Rename glyphs using ufo.lib.public.postscriptNames in UFO.
[ "Rename", "glyphs", "using", "ufo", ".", "lib", ".", "public", ".", "postscriptNames", "in", "UFO", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/postProcessor.py#L73-L93
train
24,070
googlefonts/ufo2ft
Lib/ufo2ft/postProcessor.py
PostProcessor._unique_name
def _unique_name(name, seen): """Append incremental '.N' suffix if glyph is a duplicate.""" if name in seen: n = seen[name] while (name + ".%d" % n) in seen: n += 1 seen[name] = n + 1 name += ".%d" % n seen[name] = 1 return name
python
def _unique_name(name, seen): """Append incremental '.N' suffix if glyph is a duplicate.""" if name in seen: n = seen[name] while (name + ".%d" % n) in seen: n += 1 seen[name] = n + 1 name += ".%d" % n seen[name] = 1 return name
[ "def", "_unique_name", "(", "name", ",", "seen", ")", ":", "if", "name", "in", "seen", ":", "n", "=", "seen", "[", "name", "]", "while", "(", "name", "+", "\".%d\"", "%", "n", ")", "in", "seen", ":", "n", "+=", "1", "seen", "[", "name", "]", ...
Append incremental '.N' suffix if glyph is a duplicate.
[ "Append", "incremental", ".", "N", "suffix", "if", "glyph", "is", "a", "duplicate", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/postProcessor.py#L124-L133
train
24,071
googlefonts/ufo2ft
Lib/ufo2ft/postProcessor.py
PostProcessor._build_production_name
def _build_production_name(self, glyph): """Build a production name for a single glyph.""" # use PostScript names from UFO lib if available if self._postscriptNames: production_name = self._postscriptNames.get(glyph.name) return production_name if production_name else glyph.name # use name derived from unicode value unicode_val = glyph.unicode if glyph.unicode is not None: return '%s%04X' % ( 'u' if unicode_val > 0xffff else 'uni', unicode_val) # use production name + last (non-script) suffix if possible parts = glyph.name.rsplit('.', 1) if len(parts) == 2 and parts[0] in self.glyphSet: return '%s.%s' % ( self._build_production_name(self.glyphSet[parts[0]]), parts[1]) # use ligature name, making sure to look up components with suffixes parts = glyph.name.split('.', 1) if len(parts) == 2: liga_parts = ['%s.%s' % (n, parts[1]) for n in parts[0].split('_')] else: liga_parts = glyph.name.split('_') if len(liga_parts) > 1 and all(n in self.glyphSet for n in liga_parts): unicode_vals = [self.glyphSet[n].unicode for n in liga_parts] if all(v and v <= 0xffff for v in unicode_vals): return 'uni' + ''.join('%04X' % v for v in unicode_vals) return '_'.join( self._build_production_name(self.glyphSet[n]) for n in liga_parts) return glyph.name
python
def _build_production_name(self, glyph): """Build a production name for a single glyph.""" # use PostScript names from UFO lib if available if self._postscriptNames: production_name = self._postscriptNames.get(glyph.name) return production_name if production_name else glyph.name # use name derived from unicode value unicode_val = glyph.unicode if glyph.unicode is not None: return '%s%04X' % ( 'u' if unicode_val > 0xffff else 'uni', unicode_val) # use production name + last (non-script) suffix if possible parts = glyph.name.rsplit('.', 1) if len(parts) == 2 and parts[0] in self.glyphSet: return '%s.%s' % ( self._build_production_name(self.glyphSet[parts[0]]), parts[1]) # use ligature name, making sure to look up components with suffixes parts = glyph.name.split('.', 1) if len(parts) == 2: liga_parts = ['%s.%s' % (n, parts[1]) for n in parts[0].split('_')] else: liga_parts = glyph.name.split('_') if len(liga_parts) > 1 and all(n in self.glyphSet for n in liga_parts): unicode_vals = [self.glyphSet[n].unicode for n in liga_parts] if all(v and v <= 0xffff for v in unicode_vals): return 'uni' + ''.join('%04X' % v for v in unicode_vals) return '_'.join( self._build_production_name(self.glyphSet[n]) for n in liga_parts) return glyph.name
[ "def", "_build_production_name", "(", "self", ",", "glyph", ")", ":", "# use PostScript names from UFO lib if available", "if", "self", ".", "_postscriptNames", ":", "production_name", "=", "self", ".", "_postscriptNames", ".", "get", "(", "glyph", ".", "name", ")",...
Build a production name for a single glyph.
[ "Build", "a", "production", "name", "for", "a", "single", "glyph", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/postProcessor.py#L135-L168
train
24,072
googlefonts/ufo2ft
Lib/ufo2ft/featureWriters/ast.py
makeFeaClassName
def makeFeaClassName(name, existingClassNames=None): """Make a glyph class name which is legal to use in feature text. Ensures the name only includes characters in "A-Za-z0-9._", and isn't already defined. """ name = re.sub(r"[^A-Za-z0-9._]", r"", name) if existingClassNames is None: return name i = 1 origName = name while name in existingClassNames: name = "%s_%d" % (origName, i) i += 1 return name
python
def makeFeaClassName(name, existingClassNames=None): """Make a glyph class name which is legal to use in feature text. Ensures the name only includes characters in "A-Za-z0-9._", and isn't already defined. """ name = re.sub(r"[^A-Za-z0-9._]", r"", name) if existingClassNames is None: return name i = 1 origName = name while name in existingClassNames: name = "%s_%d" % (origName, i) i += 1 return name
[ "def", "makeFeaClassName", "(", "name", ",", "existingClassNames", "=", "None", ")", ":", "name", "=", "re", ".", "sub", "(", "r\"[^A-Za-z0-9._]\"", ",", "r\"\"", ",", "name", ")", "if", "existingClassNames", "is", "None", ":", "return", "name", "i", "=", ...
Make a glyph class name which is legal to use in feature text. Ensures the name only includes characters in "A-Za-z0-9._", and isn't already defined.
[ "Make", "a", "glyph", "class", "name", "which", "is", "legal", "to", "use", "in", "feature", "text", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureWriters/ast.py#L128-L142
train
24,073
googlefonts/ufo2ft
Lib/ufo2ft/featureWriters/ast.py
addLookupReference
def addLookupReference( feature, lookup, script=None, languages=None, exclude_dflt=False ): """Shortcut for addLookupReferences, but for a single lookup. """ return addLookupReferences( feature, (lookup,), script=script, languages=languages, exclude_dflt=exclude_dflt, )
python
def addLookupReference( feature, lookup, script=None, languages=None, exclude_dflt=False ): """Shortcut for addLookupReferences, but for a single lookup. """ return addLookupReferences( feature, (lookup,), script=script, languages=languages, exclude_dflt=exclude_dflt, )
[ "def", "addLookupReference", "(", "feature", ",", "lookup", ",", "script", "=", "None", ",", "languages", "=", "None", ",", "exclude_dflt", "=", "False", ")", ":", "return", "addLookupReferences", "(", "feature", ",", "(", "lookup", ",", ")", ",", "script"...
Shortcut for addLookupReferences, but for a single lookup.
[ "Shortcut", "for", "addLookupReferences", "but", "for", "a", "single", "lookup", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureWriters/ast.py#L184-L195
train
24,074
googlefonts/ufo2ft
Lib/ufo2ft/fontInfoData.py
openTypeHeadCreatedFallback
def openTypeHeadCreatedFallback(info): """ Fallback to the environment variable SOURCE_DATE_EPOCH if set, otherwise now. """ if "SOURCE_DATE_EPOCH" in os.environ: t = datetime.utcfromtimestamp(int(os.environ["SOURCE_DATE_EPOCH"])) return t.strftime(_date_format) else: return dateStringForNow()
python
def openTypeHeadCreatedFallback(info): """ Fallback to the environment variable SOURCE_DATE_EPOCH if set, otherwise now. """ if "SOURCE_DATE_EPOCH" in os.environ: t = datetime.utcfromtimestamp(int(os.environ["SOURCE_DATE_EPOCH"])) return t.strftime(_date_format) else: return dateStringForNow()
[ "def", "openTypeHeadCreatedFallback", "(", "info", ")", ":", "if", "\"SOURCE_DATE_EPOCH\"", "in", "os", ".", "environ", ":", "t", "=", "datetime", ".", "utcfromtimestamp", "(", "int", "(", "os", ".", "environ", "[", "\"SOURCE_DATE_EPOCH\"", "]", ")", ")", "r...
Fallback to the environment variable SOURCE_DATE_EPOCH if set, otherwise now.
[ "Fallback", "to", "the", "environment", "variable", "SOURCE_DATE_EPOCH", "if", "set", "otherwise", "now", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/fontInfoData.py#L79-L88
train
24,075
googlefonts/ufo2ft
Lib/ufo2ft/fontInfoData.py
preflightInfo
def preflightInfo(info): """ Returns a dict containing two items. The value for each item will be a list of info attribute names. ================== === missingRequired Required data that is missing. missingRecommended Recommended data that is missing. ================== === """ missingRequired = set() missingRecommended = set() for attr in requiredAttributes: if not hasattr(info, attr) or getattr(info, attr) is None: missingRequired.add(attr) for attr in recommendedAttributes: if not hasattr(info, attr) or getattr(info, attr) is None: missingRecommended.add(attr) return dict(missingRequired=missingRequired, missingRecommended=missingRecommended)
python
def preflightInfo(info): """ Returns a dict containing two items. The value for each item will be a list of info attribute names. ================== === missingRequired Required data that is missing. missingRecommended Recommended data that is missing. ================== === """ missingRequired = set() missingRecommended = set() for attr in requiredAttributes: if not hasattr(info, attr) or getattr(info, attr) is None: missingRequired.add(attr) for attr in recommendedAttributes: if not hasattr(info, attr) or getattr(info, attr) is None: missingRecommended.add(attr) return dict(missingRequired=missingRequired, missingRecommended=missingRecommended)
[ "def", "preflightInfo", "(", "info", ")", ":", "missingRequired", "=", "set", "(", ")", "missingRecommended", "=", "set", "(", ")", "for", "attr", "in", "requiredAttributes", ":", "if", "not", "hasattr", "(", "info", ",", "attr", ")", "or", "getattr", "(...
Returns a dict containing two items. The value for each item will be a list of info attribute names. ================== === missingRequired Required data that is missing. missingRecommended Recommended data that is missing. ================== ===
[ "Returns", "a", "dict", "containing", "two", "items", ".", "The", "value", "for", "each", "item", "will", "be", "a", "list", "of", "info", "attribute", "names", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/fontInfoData.py#L466-L484
train
24,076
coleifer/django-relationships
relationships/models.py
RelationshipManager.add
def add(self, user, status=None, symmetrical=False): """ Add a relationship from one user to another with the given status, which defaults to "following". Adding a relationship is by default asymmetrical (akin to following someone on twitter). Specify a symmetrical relationship (akin to being friends on facebook) by passing in :param:`symmetrical` = True .. note:: If :param:`symmetrical` is set, the function will return a tuple containing the two relationship objects created """ if not status: status = RelationshipStatus.objects.following() relationship, created = Relationship.objects.get_or_create( from_user=self.instance, to_user=user, status=status, site=Site.objects.get_current() ) if symmetrical: return (relationship, user.relationships.add(self.instance, status, False)) else: return relationship
python
def add(self, user, status=None, symmetrical=False): """ Add a relationship from one user to another with the given status, which defaults to "following". Adding a relationship is by default asymmetrical (akin to following someone on twitter). Specify a symmetrical relationship (akin to being friends on facebook) by passing in :param:`symmetrical` = True .. note:: If :param:`symmetrical` is set, the function will return a tuple containing the two relationship objects created """ if not status: status = RelationshipStatus.objects.following() relationship, created = Relationship.objects.get_or_create( from_user=self.instance, to_user=user, status=status, site=Site.objects.get_current() ) if symmetrical: return (relationship, user.relationships.add(self.instance, status, False)) else: return relationship
[ "def", "add", "(", "self", ",", "user", ",", "status", "=", "None", ",", "symmetrical", "=", "False", ")", ":", "if", "not", "status", ":", "status", "=", "RelationshipStatus", ".", "objects", ".", "following", "(", ")", "relationship", ",", "created", ...
Add a relationship from one user to another with the given status, which defaults to "following". Adding a relationship is by default asymmetrical (akin to following someone on twitter). Specify a symmetrical relationship (akin to being friends on facebook) by passing in :param:`symmetrical` = True .. note:: If :param:`symmetrical` is set, the function will return a tuple containing the two relationship objects created
[ "Add", "a", "relationship", "from", "one", "user", "to", "another", "with", "the", "given", "status", "which", "defaults", "to", "following", "." ]
f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805
https://github.com/coleifer/django-relationships/blob/f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805/relationships/models.py#L83-L110
train
24,077
coleifer/django-relationships
relationships/models.py
RelationshipManager.remove
def remove(self, user, status=None, symmetrical=False): """ Remove a relationship from one user to another, with the same caveats and behavior as adding a relationship. """ if not status: status = RelationshipStatus.objects.following() res = Relationship.objects.filter( from_user=self.instance, to_user=user, status=status, site__pk=settings.SITE_ID ).delete() if symmetrical: return (res, user.relationships.remove(self.instance, status, False)) else: return res
python
def remove(self, user, status=None, symmetrical=False): """ Remove a relationship from one user to another, with the same caveats and behavior as adding a relationship. """ if not status: status = RelationshipStatus.objects.following() res = Relationship.objects.filter( from_user=self.instance, to_user=user, status=status, site__pk=settings.SITE_ID ).delete() if symmetrical: return (res, user.relationships.remove(self.instance, status, False)) else: return res
[ "def", "remove", "(", "self", ",", "user", ",", "status", "=", "None", ",", "symmetrical", "=", "False", ")", ":", "if", "not", "status", ":", "status", "=", "RelationshipStatus", ".", "objects", ".", "following", "(", ")", "res", "=", "Relationship", ...
Remove a relationship from one user to another, with the same caveats and behavior as adding a relationship.
[ "Remove", "a", "relationship", "from", "one", "user", "to", "another", "with", "the", "same", "caveats", "and", "behavior", "as", "adding", "a", "relationship", "." ]
f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805
https://github.com/coleifer/django-relationships/blob/f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805/relationships/models.py#L112-L130
train
24,078
coleifer/django-relationships
relationships/models.py
RelationshipManager.get_relationships
def get_relationships(self, status, symmetrical=False): """ Returns a QuerySet of user objects with which the given user has established a relationship. """ query = self._get_from_query(status) if symmetrical: query.update(self._get_to_query(status)) return User.objects.filter(**query)
python
def get_relationships(self, status, symmetrical=False): """ Returns a QuerySet of user objects with which the given user has established a relationship. """ query = self._get_from_query(status) if symmetrical: query.update(self._get_to_query(status)) return User.objects.filter(**query)
[ "def", "get_relationships", "(", "self", ",", "status", ",", "symmetrical", "=", "False", ")", ":", "query", "=", "self", ".", "_get_from_query", "(", "status", ")", "if", "symmetrical", ":", "query", ".", "update", "(", "self", ".", "_get_to_query", "(", ...
Returns a QuerySet of user objects with which the given user has established a relationship.
[ "Returns", "a", "QuerySet", "of", "user", "objects", "with", "which", "the", "given", "user", "has", "established", "a", "relationship", "." ]
f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805
https://github.com/coleifer/django-relationships/blob/f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805/relationships/models.py#L146-L156
train
24,079
coleifer/django-relationships
relationships/models.py
RelationshipManager.only_to
def only_to(self, status): """ Returns a QuerySet of user objects who have created a relationship to the given user, but which the given user has not reciprocated """ from_relationships = self.get_relationships(status) to_relationships = self.get_related_to(status) return to_relationships.exclude(pk__in=from_relationships.values_list('pk'))
python
def only_to(self, status): """ Returns a QuerySet of user objects who have created a relationship to the given user, but which the given user has not reciprocated """ from_relationships = self.get_relationships(status) to_relationships = self.get_related_to(status) return to_relationships.exclude(pk__in=from_relationships.values_list('pk'))
[ "def", "only_to", "(", "self", ",", "status", ")", ":", "from_relationships", "=", "self", ".", "get_relationships", "(", "status", ")", "to_relationships", "=", "self", ".", "get_related_to", "(", "status", ")", "return", "to_relationships", ".", "exclude", "...
Returns a QuerySet of user objects who have created a relationship to the given user, but which the given user has not reciprocated
[ "Returns", "a", "QuerySet", "of", "user", "objects", "who", "have", "created", "a", "relationship", "to", "the", "given", "user", "but", "which", "the", "given", "user", "has", "not", "reciprocated" ]
f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805
https://github.com/coleifer/django-relationships/blob/f15d0a186d9cc5cc2ca3fb2b6ec4b498df951805/relationships/models.py#L165-L172
train
24,080
googlefonts/ufo2ft
Lib/ufo2ft/util.py
makeOfficialGlyphOrder
def makeOfficialGlyphOrder(font, glyphOrder=None): """ Make the final glyph order for 'font'. If glyphOrder is None, try getting the font.glyphOrder list. If not explicit glyphOrder is defined, sort glyphs alphabetically. If ".notdef" glyph is present in the font, force this to always be the first glyph (at index 0). """ if glyphOrder is None: glyphOrder = getattr(font, "glyphOrder", ()) names = set(font.keys()) order = [] if ".notdef" in names: names.remove(".notdef") order.append(".notdef") for name in glyphOrder: if name not in names: continue names.remove(name) order.append(name) order.extend(sorted(names)) return order
python
def makeOfficialGlyphOrder(font, glyphOrder=None): """ Make the final glyph order for 'font'. If glyphOrder is None, try getting the font.glyphOrder list. If not explicit glyphOrder is defined, sort glyphs alphabetically. If ".notdef" glyph is present in the font, force this to always be the first glyph (at index 0). """ if glyphOrder is None: glyphOrder = getattr(font, "glyphOrder", ()) names = set(font.keys()) order = [] if ".notdef" in names: names.remove(".notdef") order.append(".notdef") for name in glyphOrder: if name not in names: continue names.remove(name) order.append(name) order.extend(sorted(names)) return order
[ "def", "makeOfficialGlyphOrder", "(", "font", ",", "glyphOrder", "=", "None", ")", ":", "if", "glyphOrder", "is", "None", ":", "glyphOrder", "=", "getattr", "(", "font", ",", "\"glyphOrder\"", ",", "(", ")", ")", "names", "=", "set", "(", "font", ".", ...
Make the final glyph order for 'font'. If glyphOrder is None, try getting the font.glyphOrder list. If not explicit glyphOrder is defined, sort glyphs alphabetically. If ".notdef" glyph is present in the font, force this to always be the first glyph (at index 0).
[ "Make", "the", "final", "glyph", "order", "for", "font", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/util.py#L27-L49
train
24,081
googlefonts/ufo2ft
Lib/ufo2ft/util.py
_GlyphSet.from_layer
def from_layer(cls, font, layerName=None, copy=False, skipExportGlyphs=None): """Return a mapping of glyph names to glyph objects from `font`.""" if layerName is not None: layer = font.layers[layerName] else: layer = font.layers.defaultLayer if copy: self = _copyLayer(layer, obj_type=cls) self.lib = deepcopy(layer.lib) else: self = cls((g.name, g) for g in layer) self.lib = layer.lib # If any glyphs in the skipExportGlyphs list are used as components, decompose # them in the containing glyphs... if skipExportGlyphs: for glyph in self.values(): if any(c.baseGlyph in skipExportGlyphs for c in glyph.components): deepCopyContours(self, glyph, glyph, Transform(), skipExportGlyphs) if hasattr(glyph, "removeComponent"): # defcon for c in [ component for component in glyph.components if component.baseGlyph in skipExportGlyphs ]: glyph.removeComponent(c) else: # ufoLib2 glyph.components[:] = [ c for c in glyph.components if c.baseGlyph not in skipExportGlyphs ] # ... and then remove them from the glyph set, if even present. for glyph_name in skipExportGlyphs: if glyph_name in self: del self[glyph_name] self.name = layer.name if layerName is not None else None return self
python
def from_layer(cls, font, layerName=None, copy=False, skipExportGlyphs=None): """Return a mapping of glyph names to glyph objects from `font`.""" if layerName is not None: layer = font.layers[layerName] else: layer = font.layers.defaultLayer if copy: self = _copyLayer(layer, obj_type=cls) self.lib = deepcopy(layer.lib) else: self = cls((g.name, g) for g in layer) self.lib = layer.lib # If any glyphs in the skipExportGlyphs list are used as components, decompose # them in the containing glyphs... if skipExportGlyphs: for glyph in self.values(): if any(c.baseGlyph in skipExportGlyphs for c in glyph.components): deepCopyContours(self, glyph, glyph, Transform(), skipExportGlyphs) if hasattr(glyph, "removeComponent"): # defcon for c in [ component for component in glyph.components if component.baseGlyph in skipExportGlyphs ]: glyph.removeComponent(c) else: # ufoLib2 glyph.components[:] = [ c for c in glyph.components if c.baseGlyph not in skipExportGlyphs ] # ... and then remove them from the glyph set, if even present. for glyph_name in skipExportGlyphs: if glyph_name in self: del self[glyph_name] self.name = layer.name if layerName is not None else None return self
[ "def", "from_layer", "(", "cls", ",", "font", ",", "layerName", "=", "None", ",", "copy", "=", "False", ",", "skipExportGlyphs", "=", "None", ")", ":", "if", "layerName", "is", "not", "None", ":", "layer", "=", "font", ".", "layers", "[", "layerName", ...
Return a mapping of glyph names to glyph objects from `font`.
[ "Return", "a", "mapping", "of", "glyph", "names", "to", "glyph", "objects", "from", "font", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/util.py#L54-L93
train
24,082
googlefonts/ufo2ft
Lib/ufo2ft/featureCompiler.py
parseLayoutFeatures
def parseLayoutFeatures(font): """ Parse OpenType layout features in the UFO and return a feaLib.ast.FeatureFile instance. """ featxt = tounicode(font.features.text or "", "utf-8") if not featxt: return ast.FeatureFile() buf = UnicodeIO(featxt) # the path is used by the lexer to resolve 'include' statements # and print filename in error messages. For the UFO spec, this # should be the path of the UFO, not the inner features.fea: # https://github.com/unified-font-object/ufo-spec/issues/55 ufoPath = font.path if ufoPath is not None: buf.name = ufoPath glyphNames = set(font.keys()) try: parser = Parser(buf, glyphNames) doc = parser.parse() except IncludedFeaNotFound as e: if ufoPath and os.path.exists(os.path.join(ufoPath, e.args[0])): logger.warning( "Please change the file name in the include(...); " "statement to be relative to the UFO itself, " "instead of relative to the 'features.fea' file " "contained in it." ) raise return doc
python
def parseLayoutFeatures(font): """ Parse OpenType layout features in the UFO and return a feaLib.ast.FeatureFile instance. """ featxt = tounicode(font.features.text or "", "utf-8") if not featxt: return ast.FeatureFile() buf = UnicodeIO(featxt) # the path is used by the lexer to resolve 'include' statements # and print filename in error messages. For the UFO spec, this # should be the path of the UFO, not the inner features.fea: # https://github.com/unified-font-object/ufo-spec/issues/55 ufoPath = font.path if ufoPath is not None: buf.name = ufoPath glyphNames = set(font.keys()) try: parser = Parser(buf, glyphNames) doc = parser.parse() except IncludedFeaNotFound as e: if ufoPath and os.path.exists(os.path.join(ufoPath, e.args[0])): logger.warning( "Please change the file name in the include(...); " "statement to be relative to the UFO itself, " "instead of relative to the 'features.fea' file " "contained in it." ) raise return doc
[ "def", "parseLayoutFeatures", "(", "font", ")", ":", "featxt", "=", "tounicode", "(", "font", ".", "features", ".", "text", "or", "\"\"", ",", "\"utf-8\"", ")", "if", "not", "featxt", ":", "return", "ast", ".", "FeatureFile", "(", ")", "buf", "=", "Uni...
Parse OpenType layout features in the UFO and return a feaLib.ast.FeatureFile instance.
[ "Parse", "OpenType", "layout", "features", "in", "the", "UFO", "and", "return", "a", "feaLib", ".", "ast", ".", "FeatureFile", "instance", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureCompiler.py#L30-L58
train
24,083
googlefonts/ufo2ft
Lib/ufo2ft/featureCompiler.py
FeatureCompiler.setupFeatures
def setupFeatures(self): """ Make the features source. **This should not be called externally.** Subclasses may override this method to handle the file creation in a different way if desired. """ if self.featureWriters: featureFile = parseLayoutFeatures(self.ufo) for writer in self.featureWriters: writer.write(self.ufo, featureFile, compiler=self) # stringify AST to get correct line numbers in error messages self.features = featureFile.asFea() else: # no featureWriters, simply read existing features' text self.features = tounicode(self.ufo.features.text or "", "utf-8")
python
def setupFeatures(self): """ Make the features source. **This should not be called externally.** Subclasses may override this method to handle the file creation in a different way if desired. """ if self.featureWriters: featureFile = parseLayoutFeatures(self.ufo) for writer in self.featureWriters: writer.write(self.ufo, featureFile, compiler=self) # stringify AST to get correct line numbers in error messages self.features = featureFile.asFea() else: # no featureWriters, simply read existing features' text self.features = tounicode(self.ufo.features.text or "", "utf-8")
[ "def", "setupFeatures", "(", "self", ")", ":", "if", "self", ".", "featureWriters", ":", "featureFile", "=", "parseLayoutFeatures", "(", "self", ".", "ufo", ")", "for", "writer", "in", "self", ".", "featureWriters", ":", "writer", ".", "write", "(", "self"...
Make the features source. **This should not be called externally.** Subclasses may override this method to handle the file creation in a different way if desired.
[ "Make", "the", "features", "source", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureCompiler.py#L213-L231
train
24,084
googlefonts/ufo2ft
Lib/ufo2ft/featureCompiler.py
FeatureCompiler.buildTables
def buildTables(self): """ Compile OpenType feature tables from the source. Raises a FeaLibError if the feature compilation was unsuccessful. **This should not be called externally.** Subclasses may override this method to handle the table compilation in a different way if desired. """ if not self.features: return # the path is used by the lexer to follow 'include' statements; # if we generated some automatic features, includes have already been # resolved, and we work from a string which does't exist on disk path = self.ufo.path if not self.featureWriters else None try: addOpenTypeFeaturesFromString( self.ttFont, self.features, filename=path ) except FeatureLibError: if path is None: # if compilation fails, create temporary file for inspection data = tobytes(self.features, encoding="utf-8") with NamedTemporaryFile(delete=False) as tmp: tmp.write(data) logger.error( "Compilation failed! Inspect temporary file: %r", tmp.name ) raise
python
def buildTables(self): """ Compile OpenType feature tables from the source. Raises a FeaLibError if the feature compilation was unsuccessful. **This should not be called externally.** Subclasses may override this method to handle the table compilation in a different way if desired. """ if not self.features: return # the path is used by the lexer to follow 'include' statements; # if we generated some automatic features, includes have already been # resolved, and we work from a string which does't exist on disk path = self.ufo.path if not self.featureWriters else None try: addOpenTypeFeaturesFromString( self.ttFont, self.features, filename=path ) except FeatureLibError: if path is None: # if compilation fails, create temporary file for inspection data = tobytes(self.features, encoding="utf-8") with NamedTemporaryFile(delete=False) as tmp: tmp.write(data) logger.error( "Compilation failed! Inspect temporary file: %r", tmp.name ) raise
[ "def", "buildTables", "(", "self", ")", ":", "if", "not", "self", ".", "features", ":", "return", "# the path is used by the lexer to follow 'include' statements;", "# if we generated some automatic features, includes have already been", "# resolved, and we work from a string which doe...
Compile OpenType feature tables from the source. Raises a FeaLibError if the feature compilation was unsuccessful. **This should not be called externally.** Subclasses may override this method to handle the table compilation in a different way if desired.
[ "Compile", "OpenType", "feature", "tables", "from", "the", "source", ".", "Raises", "a", "FeaLibError", "if", "the", "feature", "compilation", "was", "unsuccessful", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureCompiler.py#L233-L263
train
24,085
googlefonts/ufo2ft
Lib/ufo2ft/maxContextCalc.py
maxCtxFont
def maxCtxFont(font): """Calculate the usMaxContext value for an entire font.""" maxCtx = 0 for tag in ('GSUB', 'GPOS'): if tag not in font: continue table = font[tag].table if table.LookupList is None: continue for lookup in table.LookupList.Lookup: for st in lookup.SubTable: maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st) return maxCtx
python
def maxCtxFont(font): """Calculate the usMaxContext value for an entire font.""" maxCtx = 0 for tag in ('GSUB', 'GPOS'): if tag not in font: continue table = font[tag].table if table.LookupList is None: continue for lookup in table.LookupList.Lookup: for st in lookup.SubTable: maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st) return maxCtx
[ "def", "maxCtxFont", "(", "font", ")", ":", "maxCtx", "=", "0", "for", "tag", "in", "(", "'GSUB'", ",", "'GPOS'", ")", ":", "if", "tag", "not", "in", "font", ":", "continue", "table", "=", "font", "[", "tag", "]", ".", "table", "if", "table", "."...
Calculate the usMaxContext value for an entire font.
[ "Calculate", "the", "usMaxContext", "value", "for", "an", "entire", "font", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/maxContextCalc.py#L6-L19
train
24,086
googlefonts/ufo2ft
Lib/ufo2ft/maxContextCalc.py
maxCtxContextualSubtable
def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=''): """Calculate usMaxContext based on a contextual feature subtable.""" if st.Format == 1: for ruleset in getattr(st, '%s%sRuleSet' % (chain, ruleType)): if ruleset is None: continue for rule in getattr(ruleset, '%s%sRule' % (chain, ruleType)): if rule is None: continue maxCtx = maxCtxContextualRule(maxCtx, rule, chain) elif st.Format == 2: for ruleset in getattr(st, '%s%sClassSet' % (chain, ruleType)): if ruleset is None: continue for rule in getattr(ruleset, '%s%sClassRule' % (chain, ruleType)): if rule is None: continue maxCtx = maxCtxContextualRule(maxCtx, rule, chain) elif st.Format == 3: maxCtx = maxCtxContextualRule(maxCtx, st, chain) return maxCtx
python
def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=''): """Calculate usMaxContext based on a contextual feature subtable.""" if st.Format == 1: for ruleset in getattr(st, '%s%sRuleSet' % (chain, ruleType)): if ruleset is None: continue for rule in getattr(ruleset, '%s%sRule' % (chain, ruleType)): if rule is None: continue maxCtx = maxCtxContextualRule(maxCtx, rule, chain) elif st.Format == 2: for ruleset in getattr(st, '%s%sClassSet' % (chain, ruleType)): if ruleset is None: continue for rule in getattr(ruleset, '%s%sClassRule' % (chain, ruleType)): if rule is None: continue maxCtx = maxCtxContextualRule(maxCtx, rule, chain) elif st.Format == 3: maxCtx = maxCtxContextualRule(maxCtx, st, chain) return maxCtx
[ "def", "maxCtxContextualSubtable", "(", "maxCtx", ",", "st", ",", "ruleType", ",", "chain", "=", "''", ")", ":", "if", "st", ".", "Format", "==", "1", ":", "for", "ruleset", "in", "getattr", "(", "st", ",", "'%s%sRuleSet'", "%", "(", "chain", ",", "r...
Calculate usMaxContext based on a contextual feature subtable.
[ "Calculate", "usMaxContext", "based", "on", "a", "contextual", "feature", "subtable", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/maxContextCalc.py#L67-L91
train
24,087
googlefonts/ufo2ft
Lib/ufo2ft/maxContextCalc.py
maxCtxContextualRule
def maxCtxContextualRule(maxCtx, st, chain): """Calculate usMaxContext based on a contextual feature rule.""" if not chain: return max(maxCtx, st.GlyphCount) elif chain == 'Reverse': return max(maxCtx, st.GlyphCount + st.LookAheadGlyphCount) return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount)
python
def maxCtxContextualRule(maxCtx, st, chain): """Calculate usMaxContext based on a contextual feature rule.""" if not chain: return max(maxCtx, st.GlyphCount) elif chain == 'Reverse': return max(maxCtx, st.GlyphCount + st.LookAheadGlyphCount) return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount)
[ "def", "maxCtxContextualRule", "(", "maxCtx", ",", "st", ",", "chain", ")", ":", "if", "not", "chain", ":", "return", "max", "(", "maxCtx", ",", "st", ".", "GlyphCount", ")", "elif", "chain", "==", "'Reverse'", ":", "return", "max", "(", "maxCtx", ",",...
Calculate usMaxContext based on a contextual feature rule.
[ "Calculate", "usMaxContext", "based", "on", "a", "contextual", "feature", "rule", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/maxContextCalc.py#L94-L101
train
24,088
googlefonts/ufo2ft
Lib/ufo2ft/__init__.py
compileOTF
def compileOTF( ufo, preProcessorClass=OTFPreProcessor, outlineCompilerClass=OutlineOTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, optimizeCFF=CFFOptimization.SUBROUTINIZE, roundTolerance=None, removeOverlaps=False, overlapsBackend=None, inplace=False, layerName=None, skipExportGlyphs=None, _tables=None, ): """Create FontTools CFF font from a UFO. *removeOverlaps* performs a union operation on all the glyphs' contours. *optimizeCFF* (int) defines whether the CFF charstrings should be specialized and subroutinized. By default both optimization are enabled. A value of 0 disables both; 1 only enables the specialization; 2 (default) does both specialization and subroutinization. *roundTolerance* (float) controls the rounding of point coordinates. It is defined as the maximum absolute difference between the original float and the rounded integer value. By default, all floats are rounded to integer (tolerance 0.5); a value of 0 completely disables rounding; values in between only round floats which are close to their integral part within the tolerated range. *featureWriters* argument is a list of BaseFeatureWriter subclasses or pre-initialized instances. Features will be written by each feature writer in the given order. If featureWriters is None, the default feature writers [KernFeatureWriter, MarkFeatureWriter] are used. *useProductionNames* renames glyphs in TrueType 'post' or OpenType 'CFF ' tables based on the 'public.postscriptNames' mapping in the UFO lib, if present. Otherwise, uniXXXX names are generated from the glyphs' unicode values. The default value (None) will first check if the UFO lib has the 'com.github.googlei18n.ufo2ft.useProductionNames' key. If this is missing or True (default), the glyphs are renamed. Set to False to keep the original names. **inplace** (bool) specifies whether the filters should modify the input UFO's glyphs, a copy should be made first. *layerName* specifies which layer should be compiled. When compiling something other than the default layer, feature compilation is skipped. *skipExportGlyphs* is a list or set of glyph names to not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the parameter is not passed in, the UFO's "public.skipExportGlyphs" lib key will be consulted. If it doesn't exist, all glyphs are exported. UFO groups and kerning will be pruned of skipped glyphs. """ logger.info("Pre-processing glyphs") if skipExportGlyphs is None: skipExportGlyphs = ufo.lib.get("public.skipExportGlyphs", []) preProcessor = preProcessorClass( ufo, inplace=inplace, removeOverlaps=removeOverlaps, overlapsBackend=overlapsBackend, layerName=layerName, skipExportGlyphs=skipExportGlyphs, ) glyphSet = preProcessor.process() logger.info("Building OpenType tables") optimizeCFF = CFFOptimization(optimizeCFF) outlineCompiler = outlineCompilerClass( ufo, glyphSet=glyphSet, glyphOrder=glyphOrder, roundTolerance=roundTolerance, optimizeCFF=optimizeCFF >= CFFOptimization.SPECIALIZE, tables=_tables, ) otf = outlineCompiler.compile() # Only the default layer is likely to have all glyphs used in feature code. if layerName is None: compileFeatures( ufo, otf, glyphSet=glyphSet, featureWriters=featureWriters, featureCompilerClass=featureCompilerClass, ) postProcessor = PostProcessor(otf, ufo, glyphSet=glyphSet) otf = postProcessor.process( useProductionNames, optimizeCFF=optimizeCFF >= CFFOptimization.SUBROUTINIZE, ) return otf
python
def compileOTF( ufo, preProcessorClass=OTFPreProcessor, outlineCompilerClass=OutlineOTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, optimizeCFF=CFFOptimization.SUBROUTINIZE, roundTolerance=None, removeOverlaps=False, overlapsBackend=None, inplace=False, layerName=None, skipExportGlyphs=None, _tables=None, ): """Create FontTools CFF font from a UFO. *removeOverlaps* performs a union operation on all the glyphs' contours. *optimizeCFF* (int) defines whether the CFF charstrings should be specialized and subroutinized. By default both optimization are enabled. A value of 0 disables both; 1 only enables the specialization; 2 (default) does both specialization and subroutinization. *roundTolerance* (float) controls the rounding of point coordinates. It is defined as the maximum absolute difference between the original float and the rounded integer value. By default, all floats are rounded to integer (tolerance 0.5); a value of 0 completely disables rounding; values in between only round floats which are close to their integral part within the tolerated range. *featureWriters* argument is a list of BaseFeatureWriter subclasses or pre-initialized instances. Features will be written by each feature writer in the given order. If featureWriters is None, the default feature writers [KernFeatureWriter, MarkFeatureWriter] are used. *useProductionNames* renames glyphs in TrueType 'post' or OpenType 'CFF ' tables based on the 'public.postscriptNames' mapping in the UFO lib, if present. Otherwise, uniXXXX names are generated from the glyphs' unicode values. The default value (None) will first check if the UFO lib has the 'com.github.googlei18n.ufo2ft.useProductionNames' key. If this is missing or True (default), the glyphs are renamed. Set to False to keep the original names. **inplace** (bool) specifies whether the filters should modify the input UFO's glyphs, a copy should be made first. *layerName* specifies which layer should be compiled. When compiling something other than the default layer, feature compilation is skipped. *skipExportGlyphs* is a list or set of glyph names to not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the parameter is not passed in, the UFO's "public.skipExportGlyphs" lib key will be consulted. If it doesn't exist, all glyphs are exported. UFO groups and kerning will be pruned of skipped glyphs. """ logger.info("Pre-processing glyphs") if skipExportGlyphs is None: skipExportGlyphs = ufo.lib.get("public.skipExportGlyphs", []) preProcessor = preProcessorClass( ufo, inplace=inplace, removeOverlaps=removeOverlaps, overlapsBackend=overlapsBackend, layerName=layerName, skipExportGlyphs=skipExportGlyphs, ) glyphSet = preProcessor.process() logger.info("Building OpenType tables") optimizeCFF = CFFOptimization(optimizeCFF) outlineCompiler = outlineCompilerClass( ufo, glyphSet=glyphSet, glyphOrder=glyphOrder, roundTolerance=roundTolerance, optimizeCFF=optimizeCFF >= CFFOptimization.SPECIALIZE, tables=_tables, ) otf = outlineCompiler.compile() # Only the default layer is likely to have all glyphs used in feature code. if layerName is None: compileFeatures( ufo, otf, glyphSet=glyphSet, featureWriters=featureWriters, featureCompilerClass=featureCompilerClass, ) postProcessor = PostProcessor(otf, ufo, glyphSet=glyphSet) otf = postProcessor.process( useProductionNames, optimizeCFF=optimizeCFF >= CFFOptimization.SUBROUTINIZE, ) return otf
[ "def", "compileOTF", "(", "ufo", ",", "preProcessorClass", "=", "OTFPreProcessor", ",", "outlineCompilerClass", "=", "OutlineOTFCompiler", ",", "featureCompilerClass", "=", "None", ",", "featureWriters", "=", "None", ",", "glyphOrder", "=", "None", ",", "useProducti...
Create FontTools CFF font from a UFO. *removeOverlaps* performs a union operation on all the glyphs' contours. *optimizeCFF* (int) defines whether the CFF charstrings should be specialized and subroutinized. By default both optimization are enabled. A value of 0 disables both; 1 only enables the specialization; 2 (default) does both specialization and subroutinization. *roundTolerance* (float) controls the rounding of point coordinates. It is defined as the maximum absolute difference between the original float and the rounded integer value. By default, all floats are rounded to integer (tolerance 0.5); a value of 0 completely disables rounding; values in between only round floats which are close to their integral part within the tolerated range. *featureWriters* argument is a list of BaseFeatureWriter subclasses or pre-initialized instances. Features will be written by each feature writer in the given order. If featureWriters is None, the default feature writers [KernFeatureWriter, MarkFeatureWriter] are used. *useProductionNames* renames glyphs in TrueType 'post' or OpenType 'CFF ' tables based on the 'public.postscriptNames' mapping in the UFO lib, if present. Otherwise, uniXXXX names are generated from the glyphs' unicode values. The default value (None) will first check if the UFO lib has the 'com.github.googlei18n.ufo2ft.useProductionNames' key. If this is missing or True (default), the glyphs are renamed. Set to False to keep the original names. **inplace** (bool) specifies whether the filters should modify the input UFO's glyphs, a copy should be made first. *layerName* specifies which layer should be compiled. When compiling something other than the default layer, feature compilation is skipped. *skipExportGlyphs* is a list or set of glyph names to not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the parameter is not passed in, the UFO's "public.skipExportGlyphs" lib key will be consulted. If it doesn't exist, all glyphs are exported. UFO groups and kerning will be pruned of skipped glyphs.
[ "Create", "FontTools", "CFF", "font", "from", "a", "UFO", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/__init__.py#L38-L140
train
24,089
googlefonts/ufo2ft
Lib/ufo2ft/__init__.py
compileTTF
def compileTTF( ufo, preProcessorClass=TTFPreProcessor, outlineCompilerClass=OutlineTTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, convertCubics=True, cubicConversionError=None, reverseDirection=True, rememberCurveType=True, removeOverlaps=False, overlapsBackend=None, inplace=False, layerName=None, skipExportGlyphs=None, ): """Create FontTools TrueType font from a UFO. *removeOverlaps* performs a union operation on all the glyphs' contours. *convertCubics* and *cubicConversionError* specify how the conversion from cubic to quadratic curves should be handled. *layerName* specifies which layer should be compiled. When compiling something other than the default layer, feature compilation is skipped. *skipExportGlyphs* is a list or set of glyph names to not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the parameter is not passed in, the UFO's "public.skipExportGlyphs" lib key will be consulted. If it doesn't exist, all glyphs are exported. UFO groups and kerning will be pruned of skipped glyphs. """ logger.info("Pre-processing glyphs") if skipExportGlyphs is None: skipExportGlyphs = ufo.lib.get("public.skipExportGlyphs", []) preProcessor = preProcessorClass( ufo, inplace=inplace, removeOverlaps=removeOverlaps, overlapsBackend=overlapsBackend, convertCubics=convertCubics, conversionError=cubicConversionError, reverseDirection=reverseDirection, rememberCurveType=rememberCurveType, layerName=layerName, skipExportGlyphs=skipExportGlyphs, ) glyphSet = preProcessor.process() logger.info("Building OpenType tables") outlineCompiler = outlineCompilerClass( ufo, glyphSet=glyphSet, glyphOrder=glyphOrder ) otf = outlineCompiler.compile() # Only the default layer is likely to have all glyphs used in feature code. if layerName is None: compileFeatures( ufo, otf, glyphSet=glyphSet, featureWriters=featureWriters, featureCompilerClass=featureCompilerClass, ) postProcessor = PostProcessor(otf, ufo, glyphSet=glyphSet) otf = postProcessor.process(useProductionNames) return otf
python
def compileTTF( ufo, preProcessorClass=TTFPreProcessor, outlineCompilerClass=OutlineTTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, convertCubics=True, cubicConversionError=None, reverseDirection=True, rememberCurveType=True, removeOverlaps=False, overlapsBackend=None, inplace=False, layerName=None, skipExportGlyphs=None, ): """Create FontTools TrueType font from a UFO. *removeOverlaps* performs a union operation on all the glyphs' contours. *convertCubics* and *cubicConversionError* specify how the conversion from cubic to quadratic curves should be handled. *layerName* specifies which layer should be compiled. When compiling something other than the default layer, feature compilation is skipped. *skipExportGlyphs* is a list or set of glyph names to not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the parameter is not passed in, the UFO's "public.skipExportGlyphs" lib key will be consulted. If it doesn't exist, all glyphs are exported. UFO groups and kerning will be pruned of skipped glyphs. """ logger.info("Pre-processing glyphs") if skipExportGlyphs is None: skipExportGlyphs = ufo.lib.get("public.skipExportGlyphs", []) preProcessor = preProcessorClass( ufo, inplace=inplace, removeOverlaps=removeOverlaps, overlapsBackend=overlapsBackend, convertCubics=convertCubics, conversionError=cubicConversionError, reverseDirection=reverseDirection, rememberCurveType=rememberCurveType, layerName=layerName, skipExportGlyphs=skipExportGlyphs, ) glyphSet = preProcessor.process() logger.info("Building OpenType tables") outlineCompiler = outlineCompilerClass( ufo, glyphSet=glyphSet, glyphOrder=glyphOrder ) otf = outlineCompiler.compile() # Only the default layer is likely to have all glyphs used in feature code. if layerName is None: compileFeatures( ufo, otf, glyphSet=glyphSet, featureWriters=featureWriters, featureCompilerClass=featureCompilerClass, ) postProcessor = PostProcessor(otf, ufo, glyphSet=glyphSet) otf = postProcessor.process(useProductionNames) return otf
[ "def", "compileTTF", "(", "ufo", ",", "preProcessorClass", "=", "TTFPreProcessor", ",", "outlineCompilerClass", "=", "OutlineTTFCompiler", ",", "featureCompilerClass", "=", "None", ",", "featureWriters", "=", "None", ",", "glyphOrder", "=", "None", ",", "useProducti...
Create FontTools TrueType font from a UFO. *removeOverlaps* performs a union operation on all the glyphs' contours. *convertCubics* and *cubicConversionError* specify how the conversion from cubic to quadratic curves should be handled. *layerName* specifies which layer should be compiled. When compiling something other than the default layer, feature compilation is skipped. *skipExportGlyphs* is a list or set of glyph names to not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the parameter is not passed in, the UFO's "public.skipExportGlyphs" lib key will be consulted. If it doesn't exist, all glyphs are exported. UFO groups and kerning will be pruned of skipped glyphs.
[ "Create", "FontTools", "TrueType", "font", "from", "a", "UFO", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/__init__.py#L143-L216
train
24,090
googlefonts/ufo2ft
Lib/ufo2ft/__init__.py
compileInterpolatableTTFs
def compileInterpolatableTTFs( ufos, preProcessorClass=TTFInterpolatablePreProcessor, outlineCompilerClass=OutlineTTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, cubicConversionError=None, reverseDirection=True, inplace=False, layerNames=None, skipExportGlyphs=None, ): """Create FontTools TrueType fonts from a list of UFOs with interpolatable outlines. Cubic curves are converted compatibly to quadratic curves using the Cu2Qu conversion algorithm. Return an iterator object that yields a TTFont instance for each UFO. *layerNames* refers to the layer names to use glyphs from in the order of the UFOs in *ufos*. By default, this is a list of `[None]` times the number of UFOs, i.e. using the default layer from all the UFOs. When the layerName is not None for a given UFO, the corresponding TTFont object will contain only a minimum set of tables ("head", "hmtx", "glyf", "loca", "maxp", "post" and "vmtx"), and no OpenType layout tables. *skipExportGlyphs* is a list or set of glyph names to not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the parameter is not passed in, the union of all UFO's "public.skipExportGlyphs" lib keys will be used. If they don't exist, all glyphs are exported. UFO groups and kerning will be pruned of skipped glyphs. """ from ufo2ft.util import _LazyFontName if layerNames is None: layerNames = [None] * len(ufos) assert len(ufos) == len(layerNames) if skipExportGlyphs is None: skipExportGlyphs = set() for ufo in ufos: skipExportGlyphs.update(ufo.lib.get("public.skipExportGlyphs", [])) logger.info("Pre-processing glyphs") preProcessor = preProcessorClass( ufos, inplace=inplace, conversionError=cubicConversionError, reverseDirection=reverseDirection, layerNames=layerNames, skipExportGlyphs=skipExportGlyphs, ) glyphSets = preProcessor.process() for ufo, glyphSet, layerName in zip(ufos, glyphSets, layerNames): fontName = _LazyFontName(ufo) if layerName is not None: logger.info("Building OpenType tables for %s-%s", fontName, layerName) else: logger.info("Building OpenType tables for %s", fontName) outlineCompiler = outlineCompilerClass( ufo, glyphSet=glyphSet, glyphOrder=glyphOrder, tables=SPARSE_TTF_MASTER_TABLES if layerName else None, ) ttf = outlineCompiler.compile() # Only the default layer is likely to have all glyphs used in feature # code. if layerName is None: compileFeatures( ufo, ttf, glyphSet=glyphSet, featureWriters=featureWriters, featureCompilerClass=featureCompilerClass, ) postProcessor = PostProcessor(ttf, ufo, glyphSet=glyphSet) ttf = postProcessor.process(useProductionNames) if layerName is not None: # for sparse masters (i.e. containing only a subset of the glyphs), we # need to include the post table in order to store glyph names, so that # fontTools.varLib can interpolate glyphs with same name across masters. # However we want to prevent the underlinePosition/underlineThickness # fields in such sparse masters to be included when computing the deltas # for the MVAR table. Thus, we set them to this unlikely, limit value # (-36768) which is a signal varLib should ignore them when building MVAR. ttf["post"].underlinePosition = -0x8000 ttf["post"].underlineThickness = -0x8000 yield ttf
python
def compileInterpolatableTTFs( ufos, preProcessorClass=TTFInterpolatablePreProcessor, outlineCompilerClass=OutlineTTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, cubicConversionError=None, reverseDirection=True, inplace=False, layerNames=None, skipExportGlyphs=None, ): """Create FontTools TrueType fonts from a list of UFOs with interpolatable outlines. Cubic curves are converted compatibly to quadratic curves using the Cu2Qu conversion algorithm. Return an iterator object that yields a TTFont instance for each UFO. *layerNames* refers to the layer names to use glyphs from in the order of the UFOs in *ufos*. By default, this is a list of `[None]` times the number of UFOs, i.e. using the default layer from all the UFOs. When the layerName is not None for a given UFO, the corresponding TTFont object will contain only a minimum set of tables ("head", "hmtx", "glyf", "loca", "maxp", "post" and "vmtx"), and no OpenType layout tables. *skipExportGlyphs* is a list or set of glyph names to not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the parameter is not passed in, the union of all UFO's "public.skipExportGlyphs" lib keys will be used. If they don't exist, all glyphs are exported. UFO groups and kerning will be pruned of skipped glyphs. """ from ufo2ft.util import _LazyFontName if layerNames is None: layerNames = [None] * len(ufos) assert len(ufos) == len(layerNames) if skipExportGlyphs is None: skipExportGlyphs = set() for ufo in ufos: skipExportGlyphs.update(ufo.lib.get("public.skipExportGlyphs", [])) logger.info("Pre-processing glyphs") preProcessor = preProcessorClass( ufos, inplace=inplace, conversionError=cubicConversionError, reverseDirection=reverseDirection, layerNames=layerNames, skipExportGlyphs=skipExportGlyphs, ) glyphSets = preProcessor.process() for ufo, glyphSet, layerName in zip(ufos, glyphSets, layerNames): fontName = _LazyFontName(ufo) if layerName is not None: logger.info("Building OpenType tables for %s-%s", fontName, layerName) else: logger.info("Building OpenType tables for %s", fontName) outlineCompiler = outlineCompilerClass( ufo, glyphSet=glyphSet, glyphOrder=glyphOrder, tables=SPARSE_TTF_MASTER_TABLES if layerName else None, ) ttf = outlineCompiler.compile() # Only the default layer is likely to have all glyphs used in feature # code. if layerName is None: compileFeatures( ufo, ttf, glyphSet=glyphSet, featureWriters=featureWriters, featureCompilerClass=featureCompilerClass, ) postProcessor = PostProcessor(ttf, ufo, glyphSet=glyphSet) ttf = postProcessor.process(useProductionNames) if layerName is not None: # for sparse masters (i.e. containing only a subset of the glyphs), we # need to include the post table in order to store glyph names, so that # fontTools.varLib can interpolate glyphs with same name across masters. # However we want to prevent the underlinePosition/underlineThickness # fields in such sparse masters to be included when computing the deltas # for the MVAR table. Thus, we set them to this unlikely, limit value # (-36768) which is a signal varLib should ignore them when building MVAR. ttf["post"].underlinePosition = -0x8000 ttf["post"].underlineThickness = -0x8000 yield ttf
[ "def", "compileInterpolatableTTFs", "(", "ufos", ",", "preProcessorClass", "=", "TTFInterpolatablePreProcessor", ",", "outlineCompilerClass", "=", "OutlineTTFCompiler", ",", "featureCompilerClass", "=", "None", ",", "featureWriters", "=", "None", ",", "glyphOrder", "=", ...
Create FontTools TrueType fonts from a list of UFOs with interpolatable outlines. Cubic curves are converted compatibly to quadratic curves using the Cu2Qu conversion algorithm. Return an iterator object that yields a TTFont instance for each UFO. *layerNames* refers to the layer names to use glyphs from in the order of the UFOs in *ufos*. By default, this is a list of `[None]` times the number of UFOs, i.e. using the default layer from all the UFOs. When the layerName is not None for a given UFO, the corresponding TTFont object will contain only a minimum set of tables ("head", "hmtx", "glyf", "loca", "maxp", "post" and "vmtx"), and no OpenType layout tables. *skipExportGlyphs* is a list or set of glyph names to not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the parameter is not passed in, the union of all UFO's "public.skipExportGlyphs" lib keys will be used. If they don't exist, all glyphs are exported. UFO groups and kerning will be pruned of skipped glyphs.
[ "Create", "FontTools", "TrueType", "fonts", "from", "a", "list", "of", "UFOs", "with", "interpolatable", "outlines", ".", "Cubic", "curves", "are", "converted", "compatibly", "to", "quadratic", "curves", "using", "the", "Cu2Qu", "conversion", "algorithm", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/__init__.py#L219-L316
train
24,091
googlefonts/ufo2ft
Lib/ufo2ft/__init__.py
compileInterpolatableTTFsFromDS
def compileInterpolatableTTFsFromDS( designSpaceDoc, preProcessorClass=TTFInterpolatablePreProcessor, outlineCompilerClass=OutlineTTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, cubicConversionError=None, reverseDirection=True, inplace=False, ): """Create FontTools TrueType fonts from the DesignSpaceDocument UFO sources with interpolatable outlines. Cubic curves are converted compatibly to quadratic curves using the Cu2Qu conversion algorithm. If the Designspace contains a "public.skipExportGlyphs" lib key, these glyphs will not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the lib key doesn't exist in the Designspace, all glyphs are exported (keys in individual UFOs are ignored). UFO groups and kerning will be pruned of skipped glyphs. The DesignSpaceDocument should contain SourceDescriptor objects with 'font' attribute set to an already loaded defcon.Font object (or compatible UFO Font class). If 'font' attribute is unset or None, an AttributeError exception is thrown. Return a copy of the DesignSpaceDocument object (or the same one if inplace=True) with the source's 'font' attribute set to the corresponding TTFont instance. For sources that have the 'layerName' attribute defined, the corresponding TTFont object will contain only a minimum set of tables ("head", "hmtx", "glyf", "loca", "maxp", "post" and "vmtx"), and no OpenType layout tables. """ ufos, layerNames = [], [] for source in designSpaceDoc.sources: if source.font is None: raise AttributeError( "designspace source '%s' is missing required 'font' attribute" % getattr(source, "name", "<Unknown>") ) ufos.append(source.font) # 'layerName' is None for the default layer layerNames.append(source.layerName) skipExportGlyphs = designSpaceDoc.lib.get("public.skipExportGlyphs", []) ttfs = compileInterpolatableTTFs( ufos, preProcessorClass=preProcessorClass, outlineCompilerClass=outlineCompilerClass, featureCompilerClass=featureCompilerClass, featureWriters=featureWriters, glyphOrder=glyphOrder, useProductionNames=useProductionNames, cubicConversionError=cubicConversionError, reverseDirection=reverseDirection, inplace=inplace, layerNames=layerNames, skipExportGlyphs=skipExportGlyphs, ) if inplace: result = designSpaceDoc else: # TODO try a more efficient copy method that doesn't involve (de)serializing result = designSpaceDoc.__class__.fromstring(designSpaceDoc.tostring()) for source, ttf in zip(result.sources, ttfs): source.font = ttf return result
python
def compileInterpolatableTTFsFromDS( designSpaceDoc, preProcessorClass=TTFInterpolatablePreProcessor, outlineCompilerClass=OutlineTTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, cubicConversionError=None, reverseDirection=True, inplace=False, ): """Create FontTools TrueType fonts from the DesignSpaceDocument UFO sources with interpolatable outlines. Cubic curves are converted compatibly to quadratic curves using the Cu2Qu conversion algorithm. If the Designspace contains a "public.skipExportGlyphs" lib key, these glyphs will not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the lib key doesn't exist in the Designspace, all glyphs are exported (keys in individual UFOs are ignored). UFO groups and kerning will be pruned of skipped glyphs. The DesignSpaceDocument should contain SourceDescriptor objects with 'font' attribute set to an already loaded defcon.Font object (or compatible UFO Font class). If 'font' attribute is unset or None, an AttributeError exception is thrown. Return a copy of the DesignSpaceDocument object (or the same one if inplace=True) with the source's 'font' attribute set to the corresponding TTFont instance. For sources that have the 'layerName' attribute defined, the corresponding TTFont object will contain only a minimum set of tables ("head", "hmtx", "glyf", "loca", "maxp", "post" and "vmtx"), and no OpenType layout tables. """ ufos, layerNames = [], [] for source in designSpaceDoc.sources: if source.font is None: raise AttributeError( "designspace source '%s' is missing required 'font' attribute" % getattr(source, "name", "<Unknown>") ) ufos.append(source.font) # 'layerName' is None for the default layer layerNames.append(source.layerName) skipExportGlyphs = designSpaceDoc.lib.get("public.skipExportGlyphs", []) ttfs = compileInterpolatableTTFs( ufos, preProcessorClass=preProcessorClass, outlineCompilerClass=outlineCompilerClass, featureCompilerClass=featureCompilerClass, featureWriters=featureWriters, glyphOrder=glyphOrder, useProductionNames=useProductionNames, cubicConversionError=cubicConversionError, reverseDirection=reverseDirection, inplace=inplace, layerNames=layerNames, skipExportGlyphs=skipExportGlyphs, ) if inplace: result = designSpaceDoc else: # TODO try a more efficient copy method that doesn't involve (de)serializing result = designSpaceDoc.__class__.fromstring(designSpaceDoc.tostring()) for source, ttf in zip(result.sources, ttfs): source.font = ttf return result
[ "def", "compileInterpolatableTTFsFromDS", "(", "designSpaceDoc", ",", "preProcessorClass", "=", "TTFInterpolatablePreProcessor", ",", "outlineCompilerClass", "=", "OutlineTTFCompiler", ",", "featureCompilerClass", "=", "None", ",", "featureWriters", "=", "None", ",", "glyph...
Create FontTools TrueType fonts from the DesignSpaceDocument UFO sources with interpolatable outlines. Cubic curves are converted compatibly to quadratic curves using the Cu2Qu conversion algorithm. If the Designspace contains a "public.skipExportGlyphs" lib key, these glyphs will not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the lib key doesn't exist in the Designspace, all glyphs are exported (keys in individual UFOs are ignored). UFO groups and kerning will be pruned of skipped glyphs. The DesignSpaceDocument should contain SourceDescriptor objects with 'font' attribute set to an already loaded defcon.Font object (or compatible UFO Font class). If 'font' attribute is unset or None, an AttributeError exception is thrown. Return a copy of the DesignSpaceDocument object (or the same one if inplace=True) with the source's 'font' attribute set to the corresponding TTFont instance. For sources that have the 'layerName' attribute defined, the corresponding TTFont object will contain only a minimum set of tables ("head", "hmtx", "glyf", "loca", "maxp", "post" and "vmtx"), and no OpenType layout tables.
[ "Create", "FontTools", "TrueType", "fonts", "from", "the", "DesignSpaceDocument", "UFO", "sources", "with", "interpolatable", "outlines", ".", "Cubic", "curves", "are", "converted", "compatibly", "to", "quadratic", "curves", "using", "the", "Cu2Qu", "conversion", "a...
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/__init__.py#L319-L390
train
24,092
googlefonts/ufo2ft
Lib/ufo2ft/__init__.py
compileInterpolatableOTFsFromDS
def compileInterpolatableOTFsFromDS( designSpaceDoc, preProcessorClass=OTFPreProcessor, outlineCompilerClass=OutlineOTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, roundTolerance=None, inplace=False, ): """Create FontTools CFF fonts from the DesignSpaceDocument UFO sources with interpolatable outlines. Interpolatable means without subroutinization and specializer optimizations and no removal of overlaps. If the Designspace contains a "public.skipExportGlyphs" lib key, these glyphs will not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the lib key doesn't exist in the Designspace, all glyphs are exported (keys in individual UFOs are ignored). UFO groups and kerning will be pruned of skipped glyphs. The DesignSpaceDocument should contain SourceDescriptor objects with 'font' attribute set to an already loaded defcon.Font object (or compatible UFO Font class). If 'font' attribute is unset or None, an AttributeError exception is thrown. Return a copy of the DesignSpaceDocument object (or the same one if inplace=True) with the source's 'font' attribute set to the corresponding TTFont instance. For sources that have the 'layerName' attribute defined, the corresponding TTFont object will contain only a minimum set of tables ("head", "hmtx", "CFF ", "maxp", "vmtx" and "VORG"), and no OpenType layout tables. """ for source in designSpaceDoc.sources: if source.font is None: raise AttributeError( "designspace source '%s' is missing required 'font' attribute" % getattr(source, "name", "<Unknown>") ) skipExportGlyphs = designSpaceDoc.lib.get("public.skipExportGlyphs", []) otfs = [] for source in designSpaceDoc.sources: otfs.append( compileOTF( ufo=source.font, layerName=source.layerName, preProcessorClass=preProcessorClass, outlineCompilerClass=outlineCompilerClass, featureCompilerClass=featureCompilerClass, featureWriters=featureWriters, glyphOrder=glyphOrder, useProductionNames=useProductionNames, optimizeCFF=CFFOptimization.NONE, roundTolerance=roundTolerance, removeOverlaps=False, overlapsBackend=None, inplace=inplace, skipExportGlyphs=skipExportGlyphs, _tables=SPARSE_OTF_MASTER_TABLES if source.layerName else None, ) ) if inplace: result = designSpaceDoc else: # TODO try a more efficient copy method that doesn't involve (de)serializing result = designSpaceDoc.__class__.fromstring(designSpaceDoc.tostring()) for source, otf in zip(result.sources, otfs): source.font = otf return result
python
def compileInterpolatableOTFsFromDS( designSpaceDoc, preProcessorClass=OTFPreProcessor, outlineCompilerClass=OutlineOTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, roundTolerance=None, inplace=False, ): """Create FontTools CFF fonts from the DesignSpaceDocument UFO sources with interpolatable outlines. Interpolatable means without subroutinization and specializer optimizations and no removal of overlaps. If the Designspace contains a "public.skipExportGlyphs" lib key, these glyphs will not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the lib key doesn't exist in the Designspace, all glyphs are exported (keys in individual UFOs are ignored). UFO groups and kerning will be pruned of skipped glyphs. The DesignSpaceDocument should contain SourceDescriptor objects with 'font' attribute set to an already loaded defcon.Font object (or compatible UFO Font class). If 'font' attribute is unset or None, an AttributeError exception is thrown. Return a copy of the DesignSpaceDocument object (or the same one if inplace=True) with the source's 'font' attribute set to the corresponding TTFont instance. For sources that have the 'layerName' attribute defined, the corresponding TTFont object will contain only a minimum set of tables ("head", "hmtx", "CFF ", "maxp", "vmtx" and "VORG"), and no OpenType layout tables. """ for source in designSpaceDoc.sources: if source.font is None: raise AttributeError( "designspace source '%s' is missing required 'font' attribute" % getattr(source, "name", "<Unknown>") ) skipExportGlyphs = designSpaceDoc.lib.get("public.skipExportGlyphs", []) otfs = [] for source in designSpaceDoc.sources: otfs.append( compileOTF( ufo=source.font, layerName=source.layerName, preProcessorClass=preProcessorClass, outlineCompilerClass=outlineCompilerClass, featureCompilerClass=featureCompilerClass, featureWriters=featureWriters, glyphOrder=glyphOrder, useProductionNames=useProductionNames, optimizeCFF=CFFOptimization.NONE, roundTolerance=roundTolerance, removeOverlaps=False, overlapsBackend=None, inplace=inplace, skipExportGlyphs=skipExportGlyphs, _tables=SPARSE_OTF_MASTER_TABLES if source.layerName else None, ) ) if inplace: result = designSpaceDoc else: # TODO try a more efficient copy method that doesn't involve (de)serializing result = designSpaceDoc.__class__.fromstring(designSpaceDoc.tostring()) for source, otf in zip(result.sources, otfs): source.font = otf return result
[ "def", "compileInterpolatableOTFsFromDS", "(", "designSpaceDoc", ",", "preProcessorClass", "=", "OTFPreProcessor", ",", "outlineCompilerClass", "=", "OutlineOTFCompiler", ",", "featureCompilerClass", "=", "None", ",", "featureWriters", "=", "None", ",", "glyphOrder", "=",...
Create FontTools CFF fonts from the DesignSpaceDocument UFO sources with interpolatable outlines. Interpolatable means without subroutinization and specializer optimizations and no removal of overlaps. If the Designspace contains a "public.skipExportGlyphs" lib key, these glyphs will not be exported to the final font. If these glyphs are used as components in any other glyph, those components get decomposed. If the lib key doesn't exist in the Designspace, all glyphs are exported (keys in individual UFOs are ignored). UFO groups and kerning will be pruned of skipped glyphs. The DesignSpaceDocument should contain SourceDescriptor objects with 'font' attribute set to an already loaded defcon.Font object (or compatible UFO Font class). If 'font' attribute is unset or None, an AttributeError exception is thrown. Return a copy of the DesignSpaceDocument object (or the same one if inplace=True) with the source's 'font' attribute set to the corresponding TTFont instance. For sources that have the 'layerName' attribute defined, the corresponding TTFont object will contain only a minimum set of tables ("head", "hmtx", "CFF ", "maxp", "vmtx" and "VORG"), and no OpenType layout tables.
[ "Create", "FontTools", "CFF", "fonts", "from", "the", "DesignSpaceDocument", "UFO", "sources", "with", "interpolatable", "outlines", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/__init__.py#L393-L470
train
24,093
googlefonts/ufo2ft
Lib/ufo2ft/__init__.py
compileFeatures
def compileFeatures( ufo, ttFont=None, glyphSet=None, featureWriters=None, featureCompilerClass=None, ): """ Compile OpenType Layout features from `ufo` into FontTools OTL tables. If `ttFont` is None, a new TTFont object is created containing the new tables, else the provided `ttFont` is updated with the new tables. If no explicit `featureCompilerClass` is provided, the one used will depend on whether the ufo contains any MTI feature files in its 'data' directory (thus the `MTIFeatureCompiler` is used) or not (then the default FeatureCompiler for Adobe FDK features is used). If skipExportGlyphs is provided (see description in the ``compile*`` functions), the feature compiler will prune groups (removing them if empty) and kerning of the UFO of these glyphs. The feature file is left untouched. """ if featureCompilerClass is None: if any( fn.startswith(MTI_FEATURES_PREFIX) and fn.endswith(".mti") for fn in ufo.data.fileNames ): featureCompilerClass = MtiFeatureCompiler else: featureCompilerClass = FeatureCompiler featureCompiler = featureCompilerClass( ufo, ttFont, glyphSet=glyphSet, featureWriters=featureWriters ) return featureCompiler.compile()
python
def compileFeatures( ufo, ttFont=None, glyphSet=None, featureWriters=None, featureCompilerClass=None, ): """ Compile OpenType Layout features from `ufo` into FontTools OTL tables. If `ttFont` is None, a new TTFont object is created containing the new tables, else the provided `ttFont` is updated with the new tables. If no explicit `featureCompilerClass` is provided, the one used will depend on whether the ufo contains any MTI feature files in its 'data' directory (thus the `MTIFeatureCompiler` is used) or not (then the default FeatureCompiler for Adobe FDK features is used). If skipExportGlyphs is provided (see description in the ``compile*`` functions), the feature compiler will prune groups (removing them if empty) and kerning of the UFO of these glyphs. The feature file is left untouched. """ if featureCompilerClass is None: if any( fn.startswith(MTI_FEATURES_PREFIX) and fn.endswith(".mti") for fn in ufo.data.fileNames ): featureCompilerClass = MtiFeatureCompiler else: featureCompilerClass = FeatureCompiler featureCompiler = featureCompilerClass( ufo, ttFont, glyphSet=glyphSet, featureWriters=featureWriters ) return featureCompiler.compile()
[ "def", "compileFeatures", "(", "ufo", ",", "ttFont", "=", "None", ",", "glyphSet", "=", "None", ",", "featureWriters", "=", "None", ",", "featureCompilerClass", "=", "None", ",", ")", ":", "if", "featureCompilerClass", "is", "None", ":", "if", "any", "(", ...
Compile OpenType Layout features from `ufo` into FontTools OTL tables. If `ttFont` is None, a new TTFont object is created containing the new tables, else the provided `ttFont` is updated with the new tables. If no explicit `featureCompilerClass` is provided, the one used will depend on whether the ufo contains any MTI feature files in its 'data' directory (thus the `MTIFeatureCompiler` is used) or not (then the default FeatureCompiler for Adobe FDK features is used). If skipExportGlyphs is provided (see description in the ``compile*`` functions), the feature compiler will prune groups (removing them if empty) and kerning of the UFO of these glyphs. The feature file is left untouched.
[ "Compile", "OpenType", "Layout", "features", "from", "ufo", "into", "FontTools", "OTL", "tables", ".", "If", "ttFont", "is", "None", "a", "new", "TTFont", "object", "is", "created", "containing", "the", "new", "tables", "else", "the", "provided", "ttFont", "...
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/__init__.py#L473-L504
train
24,094
googlefonts/ufo2ft
Lib/ufo2ft/filters/propagateAnchors.py
_propagate_glyph_anchors
def _propagate_glyph_anchors(glyphSet, composite, processed): """ Propagate anchors from base glyphs to a given composite glyph, and to all composite glyphs used in between. """ if composite.name in processed: return processed.add(composite.name) if not composite.components: return base_components = [] mark_components = [] anchor_names = set() to_add = {} for component in composite.components: try: glyph = glyphSet[component.baseGlyph] except KeyError: logger.warning( 'Anchors not propagated for inexistent component {} ' 'in glyph {}'.format(component.baseGlyph, composite.name)) else: _propagate_glyph_anchors(glyphSet, glyph, processed) if any(a.name.startswith('_') for a in glyph.anchors): mark_components.append(component) else: base_components.append(component) anchor_names |= {a.name for a in glyph.anchors} if mark_components and not base_components and _is_ligature_mark(composite): # The composite is a mark that is composed of other marks (E.g. # "circumflexcomb_tildecomb"). Promote the mark that is positioned closest # to the origin to a base. try: component = _component_closest_to_origin(mark_components, glyphSet) except Exception as e: raise Exception( "Error while determining which component of composite " "'{}' is the lowest: {}".format(composite.name, str(e)) ) mark_components.remove(component) base_components.append(component) glyph = glyphSet[component.baseGlyph] anchor_names |= {a.name for a in glyph.anchors} for anchor_name in anchor_names: # don't add if composite glyph already contains this anchor OR any # associated ligature anchors (e.g. "top_1, top_2" for "top") if not any(a.name.startswith(anchor_name) for a in composite.anchors): _get_anchor_data(to_add, glyphSet, base_components, anchor_name) for component in mark_components: _adjust_anchors(to_add, glyphSet, component) # we sort propagated anchors to append in a deterministic order for name, (x, y) in sorted(to_add.items()): anchor_dict = {'name': name, 'x': x, 'y': y} try: composite.appendAnchor(anchor_dict) except TypeError: # pragma: no cover # fontParts API composite.appendAnchor(name, (x, y))
python
def _propagate_glyph_anchors(glyphSet, composite, processed): """ Propagate anchors from base glyphs to a given composite glyph, and to all composite glyphs used in between. """ if composite.name in processed: return processed.add(composite.name) if not composite.components: return base_components = [] mark_components = [] anchor_names = set() to_add = {} for component in composite.components: try: glyph = glyphSet[component.baseGlyph] except KeyError: logger.warning( 'Anchors not propagated for inexistent component {} ' 'in glyph {}'.format(component.baseGlyph, composite.name)) else: _propagate_glyph_anchors(glyphSet, glyph, processed) if any(a.name.startswith('_') for a in glyph.anchors): mark_components.append(component) else: base_components.append(component) anchor_names |= {a.name for a in glyph.anchors} if mark_components and not base_components and _is_ligature_mark(composite): # The composite is a mark that is composed of other marks (E.g. # "circumflexcomb_tildecomb"). Promote the mark that is positioned closest # to the origin to a base. try: component = _component_closest_to_origin(mark_components, glyphSet) except Exception as e: raise Exception( "Error while determining which component of composite " "'{}' is the lowest: {}".format(composite.name, str(e)) ) mark_components.remove(component) base_components.append(component) glyph = glyphSet[component.baseGlyph] anchor_names |= {a.name for a in glyph.anchors} for anchor_name in anchor_names: # don't add if composite glyph already contains this anchor OR any # associated ligature anchors (e.g. "top_1, top_2" for "top") if not any(a.name.startswith(anchor_name) for a in composite.anchors): _get_anchor_data(to_add, glyphSet, base_components, anchor_name) for component in mark_components: _adjust_anchors(to_add, glyphSet, component) # we sort propagated anchors to append in a deterministic order for name, (x, y) in sorted(to_add.items()): anchor_dict = {'name': name, 'x': x, 'y': y} try: composite.appendAnchor(anchor_dict) except TypeError: # pragma: no cover # fontParts API composite.appendAnchor(name, (x, y))
[ "def", "_propagate_glyph_anchors", "(", "glyphSet", ",", "composite", ",", "processed", ")", ":", "if", "composite", ".", "name", "in", "processed", ":", "return", "processed", ".", "add", "(", "composite", ".", "name", ")", "if", "not", "composite", ".", ...
Propagate anchors from base glyphs to a given composite glyph, and to all composite glyphs used in between.
[ "Propagate", "anchors", "from", "base", "glyphs", "to", "a", "given", "composite", "glyph", "and", "to", "all", "composite", "glyphs", "used", "in", "between", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/filters/propagateAnchors.py#L51-L115
train
24,095
googlefonts/ufo2ft
Lib/ufo2ft/filters/propagateAnchors.py
_get_anchor_data
def _get_anchor_data(anchor_data, glyphSet, components, anchor_name): """Get data for an anchor from a list of components.""" anchors = [] for component in components: for anchor in glyphSet[component.baseGlyph].anchors: if anchor.name == anchor_name: anchors.append((anchor, component)) break if len(anchors) > 1: for i, (anchor, component) in enumerate(anchors): t = Transform(*component.transformation) name = '%s_%d' % (anchor.name, i + 1) anchor_data[name] = t.transformPoint((anchor.x, anchor.y)) elif anchors: anchor, component = anchors[0] t = Transform(*component.transformation) anchor_data[anchor.name] = t.transformPoint((anchor.x, anchor.y))
python
def _get_anchor_data(anchor_data, glyphSet, components, anchor_name): """Get data for an anchor from a list of components.""" anchors = [] for component in components: for anchor in glyphSet[component.baseGlyph].anchors: if anchor.name == anchor_name: anchors.append((anchor, component)) break if len(anchors) > 1: for i, (anchor, component) in enumerate(anchors): t = Transform(*component.transformation) name = '%s_%d' % (anchor.name, i + 1) anchor_data[name] = t.transformPoint((anchor.x, anchor.y)) elif anchors: anchor, component = anchors[0] t = Transform(*component.transformation) anchor_data[anchor.name] = t.transformPoint((anchor.x, anchor.y))
[ "def", "_get_anchor_data", "(", "anchor_data", ",", "glyphSet", ",", "components", ",", "anchor_name", ")", ":", "anchors", "=", "[", "]", "for", "component", "in", "components", ":", "for", "anchor", "in", "glyphSet", "[", "component", ".", "baseGlyph", "]"...
Get data for an anchor from a list of components.
[ "Get", "data", "for", "an", "anchor", "from", "a", "list", "of", "components", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/filters/propagateAnchors.py#L118-L135
train
24,096
googlefonts/ufo2ft
Lib/ufo2ft/featureWriters/baseFeatureWriter.py
BaseFeatureWriter.setContext
def setContext(self, font, feaFile, compiler=None): """ Populate a temporary `self.context` namespace, which is reset after each new call to `_write` method. Subclasses can override this to provide contextual information which depends on other data, or set any temporary attributes. The default implementation sets: - the current font; - the current FeatureFile object; - the current compiler instance (only present when this writer was instantiated from a FeatureCompiler); - a set of features (tags) to be generated. If self.mode is "skip", these are all the features which are _not_ already present. Returns the context namespace instance. """ todo = set(self.features) if self.mode == "skip": existing = ast.findFeatureTags(feaFile) todo.difference_update(existing) self.context = SimpleNamespace( font=font, feaFile=feaFile, compiler=compiler, todo=todo ) return self.context
python
def setContext(self, font, feaFile, compiler=None): """ Populate a temporary `self.context` namespace, which is reset after each new call to `_write` method. Subclasses can override this to provide contextual information which depends on other data, or set any temporary attributes. The default implementation sets: - the current font; - the current FeatureFile object; - the current compiler instance (only present when this writer was instantiated from a FeatureCompiler); - a set of features (tags) to be generated. If self.mode is "skip", these are all the features which are _not_ already present. Returns the context namespace instance. """ todo = set(self.features) if self.mode == "skip": existing = ast.findFeatureTags(feaFile) todo.difference_update(existing) self.context = SimpleNamespace( font=font, feaFile=feaFile, compiler=compiler, todo=todo ) return self.context
[ "def", "setContext", "(", "self", ",", "font", ",", "feaFile", ",", "compiler", "=", "None", ")", ":", "todo", "=", "set", "(", "self", ".", "features", ")", "if", "self", ".", "mode", "==", "\"skip\"", ":", "existing", "=", "ast", ".", "findFeatureT...
Populate a temporary `self.context` namespace, which is reset after each new call to `_write` method. Subclasses can override this to provide contextual information which depends on other data, or set any temporary attributes. The default implementation sets: - the current font; - the current FeatureFile object; - the current compiler instance (only present when this writer was instantiated from a FeatureCompiler); - a set of features (tags) to be generated. If self.mode is "skip", these are all the features which are _not_ already present. Returns the context namespace instance.
[ "Populate", "a", "temporary", "self", ".", "context", "namespace", "which", "is", "reset", "after", "each", "new", "call", "to", "_write", "method", ".", "Subclasses", "can", "override", "this", "to", "provide", "contextual", "information", "which", "depends", ...
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureWriters/baseFeatureWriter.py#L70-L95
train
24,097
googlefonts/ufo2ft
Lib/ufo2ft/featureWriters/baseFeatureWriter.py
BaseFeatureWriter.write
def write(self, font, feaFile, compiler=None): """Write features and class definitions for this font to a feaLib FeatureFile object. Returns True if feature file was modified, False if no new features were generated. """ self.setContext(font, feaFile, compiler=compiler) try: if self.shouldContinue(): return self._write() else: return False finally: del self.context
python
def write(self, font, feaFile, compiler=None): """Write features and class definitions for this font to a feaLib FeatureFile object. Returns True if feature file was modified, False if no new features were generated. """ self.setContext(font, feaFile, compiler=compiler) try: if self.shouldContinue(): return self._write() else: return False finally: del self.context
[ "def", "write", "(", "self", ",", "font", ",", "feaFile", ",", "compiler", "=", "None", ")", ":", "self", ".", "setContext", "(", "font", ",", "feaFile", ",", "compiler", "=", "compiler", ")", "try", ":", "if", "self", ".", "shouldContinue", "(", ")"...
Write features and class definitions for this font to a feaLib FeatureFile object. Returns True if feature file was modified, False if no new features were generated.
[ "Write", "features", "and", "class", "definitions", "for", "this", "font", "to", "a", "feaLib", "FeatureFile", "object", ".", "Returns", "True", "if", "feature", "file", "was", "modified", "False", "if", "no", "new", "features", "were", "generated", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureWriters/baseFeatureWriter.py#L109-L122
train
24,098
googlefonts/ufo2ft
Lib/ufo2ft/featureWriters/baseFeatureWriter.py
BaseFeatureWriter.makeUnicodeToGlyphNameMapping
def makeUnicodeToGlyphNameMapping(self): """Return the Unicode to glyph name mapping for the current font. """ # Try to get the "best" Unicode cmap subtable if this writer is running # in the context of a FeatureCompiler, else create a new mapping from # the UFO glyphs compiler = self.context.compiler cmap = None if compiler is not None: table = compiler.ttFont.get("cmap") if table is not None: cmap = table.getBestCmap() if cmap is None: from ufo2ft.util import makeUnicodeToGlyphNameMapping if compiler is not None: glyphSet = compiler.glyphSet else: glyphSet = self.context.font cmap = makeUnicodeToGlyphNameMapping(glyphSet) return cmap
python
def makeUnicodeToGlyphNameMapping(self): """Return the Unicode to glyph name mapping for the current font. """ # Try to get the "best" Unicode cmap subtable if this writer is running # in the context of a FeatureCompiler, else create a new mapping from # the UFO glyphs compiler = self.context.compiler cmap = None if compiler is not None: table = compiler.ttFont.get("cmap") if table is not None: cmap = table.getBestCmap() if cmap is None: from ufo2ft.util import makeUnicodeToGlyphNameMapping if compiler is not None: glyphSet = compiler.glyphSet else: glyphSet = self.context.font cmap = makeUnicodeToGlyphNameMapping(glyphSet) return cmap
[ "def", "makeUnicodeToGlyphNameMapping", "(", "self", ")", ":", "# Try to get the \"best\" Unicode cmap subtable if this writer is running", "# in the context of a FeatureCompiler, else create a new mapping from", "# the UFO glyphs", "compiler", "=", "self", ".", "context", ".", "compil...
Return the Unicode to glyph name mapping for the current font.
[ "Return", "the", "Unicode", "to", "glyph", "name", "mapping", "for", "the", "current", "font", "." ]
915b986558e87bee288765d9218cc1cd4ebf7f4c
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureWriters/baseFeatureWriter.py#L128-L148
train
24,099