repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
LogicalDash/LiSE
ELiDE/ELiDE/stores.py
munge_source
def munge_source(v): """Take Python source code, return a pair of its parameters and the rest of it dedented""" lines = v.split('\n') if not lines: return tuple(), '' firstline = lines[0].lstrip() while firstline == '' or firstline[0] == '@': del lines[0] firstline = lines[0].lstrip() if not lines: return tuple(), '' params = tuple( parm.strip() for parm in sig_ex.match(lines[0]).group(1).split(',') ) del lines[0] if not lines: return params, '' # hack to allow 'empty' functions if lines and lines[-1].strip() == 'pass': del lines[-1] return params, dedent('\n'.join(lines))
python
def munge_source(v): """Take Python source code, return a pair of its parameters and the rest of it dedented""" lines = v.split('\n') if not lines: return tuple(), '' firstline = lines[0].lstrip() while firstline == '' or firstline[0] == '@': del lines[0] firstline = lines[0].lstrip() if not lines: return tuple(), '' params = tuple( parm.strip() for parm in sig_ex.match(lines[0]).group(1).split(',') ) del lines[0] if not lines: return params, '' # hack to allow 'empty' functions if lines and lines[-1].strip() == 'pass': del lines[-1] return params, dedent('\n'.join(lines))
[ "def", "munge_source", "(", "v", ")", ":", "lines", "=", "v", ".", "split", "(", "'\\n'", ")", "if", "not", "lines", ":", "return", "tuple", "(", ")", ",", "''", "firstline", "=", "lines", "[", "0", "]", ".", "lstrip", "(", ")", "while", "firstli...
Take Python source code, return a pair of its parameters and the rest of it dedented
[ "Take", "Python", "source", "code", "return", "a", "pair", "of", "its", "parameters", "and", "the", "rest", "of", "it", "dedented" ]
fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/stores.py#L440-L461
train
33,000
LogicalDash/LiSE
ELiDE/ELiDE/stores.py
StoreList.redata
def redata(self, *args, **kwargs): """Update my ``data`` to match what's in my ``store``""" select_name = kwargs.get('select_name') if not self.store: Clock.schedule_once(self.redata) return self.data = list(map(self.munge, enumerate(self._iter_keys()))) if select_name: self._trigger_select_name(select_name)
python
def redata(self, *args, **kwargs): """Update my ``data`` to match what's in my ``store``""" select_name = kwargs.get('select_name') if not self.store: Clock.schedule_once(self.redata) return self.data = list(map(self.munge, enumerate(self._iter_keys()))) if select_name: self._trigger_select_name(select_name)
[ "def", "redata", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "select_name", "=", "kwargs", ".", "get", "(", "'select_name'", ")", "if", "not", "self", ".", "store", ":", "Clock", ".", "schedule_once", "(", "self", ".", "redata",...
Update my ``data`` to match what's in my ``store``
[ "Update", "my", "data", "to", "match", "what", "s", "in", "my", "store" ]
fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/stores.py#L133-L141
train
33,001
LogicalDash/LiSE
ELiDE/ELiDE/stores.py
StoreList.select_name
def select_name(self, name, *args): """Select an item by its name, highlighting""" self.boxl.select_node(self._name2i[name])
python
def select_name(self, name, *args): """Select an item by its name, highlighting""" self.boxl.select_node(self._name2i[name])
[ "def", "select_name", "(", "self", ",", "name", ",", "*", "args", ")", ":", "self", ".", "boxl", ".", "select_node", "(", "self", ".", "_name2i", "[", "name", "]", ")" ]
Select an item by its name, highlighting
[ "Select", "an", "item", "by", "its", "name", "highlighting" ]
fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/stores.py#L148-L150
train
33,002
LogicalDash/LiSE
ELiDE/ELiDE/stores.py
Editor.save
def save(self, *args): """Put text in my store, return True if it changed""" if self.name_wid is None or self.store is None: Logger.debug("{}: Not saving, missing name_wid or store".format(type(self).__name__)) return if not (self.name_wid.text or self.name_wid.hint_text): Logger.debug("{}: Not saving, no name".format(type(self).__name__)) return if self.name_wid.text and self.name_wid.text[0] in string.digits + string.whitespace + string.punctuation: # TODO alert the user to invalid name Logger.warning("{}: Not saving, invalid name".format(type(self).__name__)) return if hasattr(self, '_do_parse'): try: parse(self.source) except SyntaxError: # TODO alert user to invalid source Logger.debug("{}: Not saving, couldn't parse".format(type(self).__name__)) return do_redata = False if self.name_wid.text: if ( self.name_wid.hint_text and self.name_wid.hint_text != self.name_wid.text and hasattr(self.store, self.name_wid.hint_text) ): delattr(self.store, self.name_wid.hint_text) do_redata = True if ( not hasattr(self.store, self.name_wid.text) or getattr(self.store, self.name_wid.text) != self.source ): Logger.debug("{}: Saving!".format(type(self).__name__)) setattr(self.store, self.name_wid.text, self.source) do_redata = True elif self.name_wid.hint_text: if ( not hasattr(self.store, self.name_wid.hint_text) or getattr(self.store, self.name_wid.hint_text) != self.source ): Logger.debug("{}: Saving!".format(type(self).__name__)) setattr(self.store, self.name_wid.hint_text, self.source) do_redata = True return do_redata
python
def save(self, *args): """Put text in my store, return True if it changed""" if self.name_wid is None or self.store is None: Logger.debug("{}: Not saving, missing name_wid or store".format(type(self).__name__)) return if not (self.name_wid.text or self.name_wid.hint_text): Logger.debug("{}: Not saving, no name".format(type(self).__name__)) return if self.name_wid.text and self.name_wid.text[0] in string.digits + string.whitespace + string.punctuation: # TODO alert the user to invalid name Logger.warning("{}: Not saving, invalid name".format(type(self).__name__)) return if hasattr(self, '_do_parse'): try: parse(self.source) except SyntaxError: # TODO alert user to invalid source Logger.debug("{}: Not saving, couldn't parse".format(type(self).__name__)) return do_redata = False if self.name_wid.text: if ( self.name_wid.hint_text and self.name_wid.hint_text != self.name_wid.text and hasattr(self.store, self.name_wid.hint_text) ): delattr(self.store, self.name_wid.hint_text) do_redata = True if ( not hasattr(self.store, self.name_wid.text) or getattr(self.store, self.name_wid.text) != self.source ): Logger.debug("{}: Saving!".format(type(self).__name__)) setattr(self.store, self.name_wid.text, self.source) do_redata = True elif self.name_wid.hint_text: if ( not hasattr(self.store, self.name_wid.hint_text) or getattr(self.store, self.name_wid.hint_text) != self.source ): Logger.debug("{}: Saving!".format(type(self).__name__)) setattr(self.store, self.name_wid.hint_text, self.source) do_redata = True return do_redata
[ "def", "save", "(", "self", ",", "*", "args", ")", ":", "if", "self", ".", "name_wid", "is", "None", "or", "self", ".", "store", "is", "None", ":", "Logger", ".", "debug", "(", "\"{}: Not saving, missing name_wid or store\"", ".", "format", "(", "type", ...
Put text in my store, return True if it changed
[ "Put", "text", "in", "my", "store", "return", "True", "if", "it", "changed" ]
fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/stores.py#L213-L256
train
33,003
LogicalDash/LiSE
ELiDE/ELiDE/stores.py
Editor.delete
def delete(self, *args): """Remove the currently selected item from my store""" key = self.name_wid.text or self.name_wid.hint_text if not hasattr(self.store, key): # TODO feedback about missing key return delattr(self.store, key) try: return min(kee for kee in dir(self.store) if kee > key) except ValueError: return '+'
python
def delete(self, *args): """Remove the currently selected item from my store""" key = self.name_wid.text or self.name_wid.hint_text if not hasattr(self.store, key): # TODO feedback about missing key return delattr(self.store, key) try: return min(kee for kee in dir(self.store) if kee > key) except ValueError: return '+'
[ "def", "delete", "(", "self", ",", "*", "args", ")", ":", "key", "=", "self", ".", "name_wid", ".", "text", "or", "self", ".", "name_wid", ".", "hint_text", "if", "not", "hasattr", "(", "self", ".", "store", ",", "key", ")", ":", "# TODO feedback abo...
Remove the currently selected item from my store
[ "Remove", "the", "currently", "selected", "item", "from", "my", "store" ]
fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/stores.py#L258-L268
train
33,004
LogicalDash/LiSE
ELiDE/ELiDE/dialog.py
DialogLayout.advance_dialog
def advance_dialog(self, *args): """Try to display the next dialog described in my ``todo``.""" self.clear_widgets() try: self._update_dialog(self.todo[self.idx]) except IndexError: pass
python
def advance_dialog(self, *args): """Try to display the next dialog described in my ``todo``.""" self.clear_widgets() try: self._update_dialog(self.todo[self.idx]) except IndexError: pass
[ "def", "advance_dialog", "(", "self", ",", "*", "args", ")", ":", "self", ".", "clear_widgets", "(", ")", "try", ":", "self", ".", "_update_dialog", "(", "self", ".", "todo", "[", "self", ".", "idx", "]", ")", "except", "IndexError", ":", "pass" ]
Try to display the next dialog described in my ``todo``.
[ "Try", "to", "display", "the", "next", "dialog", "described", "in", "my", "todo", "." ]
fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/dialog.py#L186-L192
train
33,005
LogicalDash/LiSE
ELiDE/ELiDE/dialog.py
DialogLayout.ok
def ok(self, *args, cb=None): """Clear dialog widgets, call ``cb`` if provided, and advance the dialog queue""" self.clear_widgets() if cb: cb() self.idx += 1 self.advance_dialog()
python
def ok(self, *args, cb=None): """Clear dialog widgets, call ``cb`` if provided, and advance the dialog queue""" self.clear_widgets() if cb: cb() self.idx += 1 self.advance_dialog()
[ "def", "ok", "(", "self", ",", "*", "args", ",", "cb", "=", "None", ")", ":", "self", ".", "clear_widgets", "(", ")", "if", "cb", ":", "cb", "(", ")", "self", ".", "idx", "+=", "1", "self", ".", "advance_dialog", "(", ")" ]
Clear dialog widgets, call ``cb`` if provided, and advance the dialog queue
[ "Clear", "dialog", "widgets", "call", "cb", "if", "provided", "and", "advance", "the", "dialog", "queue" ]
fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/dialog.py#L233-L239
train
33,006
niemasd/TreeSwift
treeswift/Node.py
Node.add_child
def add_child(self, child): '''Add child to ``Node`` object Args: ``child`` (``Node``): The child ``Node`` to be added ''' if not isinstance(child, Node): raise TypeError("child must be a Node") self.children.append(child); child.parent = self
python
def add_child(self, child): '''Add child to ``Node`` object Args: ``child`` (``Node``): The child ``Node`` to be added ''' if not isinstance(child, Node): raise TypeError("child must be a Node") self.children.append(child); child.parent = self
[ "def", "add_child", "(", "self", ",", "child", ")", ":", "if", "not", "isinstance", "(", "child", ",", "Node", ")", ":", "raise", "TypeError", "(", "\"child must be a Node\"", ")", "self", ".", "children", ".", "append", "(", "child", ")", "child", ".", ...
Add child to ``Node`` object Args: ``child`` (``Node``): The child ``Node`` to be added
[ "Add", "child", "to", "Node", "object" ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L60-L68
train
33,007
niemasd/TreeSwift
treeswift/Node.py
Node.contract
def contract(self): '''Contract this ``Node`` by directly connecting its children to its parent''' if self.is_root(): return for c in self.children: if self.edge_length is not None and c.edge_length is not None: c.edge_length += self.edge_length self.parent.add_child(c) self.parent.remove_child(self)
python
def contract(self): '''Contract this ``Node`` by directly connecting its children to its parent''' if self.is_root(): return for c in self.children: if self.edge_length is not None and c.edge_length is not None: c.edge_length += self.edge_length self.parent.add_child(c) self.parent.remove_child(self)
[ "def", "contract", "(", "self", ")", ":", "if", "self", ".", "is_root", "(", ")", ":", "return", "for", "c", "in", "self", ".", "children", ":", "if", "self", ".", "edge_length", "is", "not", "None", "and", "c", ".", "edge_length", "is", "not", "No...
Contract this ``Node`` by directly connecting its children to its parent
[ "Contract", "this", "Node", "by", "directly", "connecting", "its", "children", "to", "its", "parent" ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L78-L86
train
33,008
niemasd/TreeSwift
treeswift/Node.py
Node.newick
def newick(self): '''Newick string conversion starting at this ``Node`` object Returns: ``str``: Newick string conversion starting at this ``Node`` object ''' node_to_str = dict() for node in self.traverse_postorder(): if node.is_leaf(): if node.label is None: node_to_str[node] = '' else: node_to_str[node] = str(node.label) else: out = ['('] for c in node.children: out.append(node_to_str[c]) if c.edge_length is not None: if isinstance(c.edge_length,int): l_str = str(c.edge_length) elif isinstance(c.edge_length,float) and c.edge_length.is_integer(): l_str = str(int(c.edge_length)) else: l_str = str(c.edge_length) out.append(':%s' % l_str) out.append(',') del node_to_str[c] out.pop() # trailing comma out.append(')') if node.label is not None: out.append(str(node.label)) node_to_str[node] = ''.join(out) return node_to_str[self]
python
def newick(self): '''Newick string conversion starting at this ``Node`` object Returns: ``str``: Newick string conversion starting at this ``Node`` object ''' node_to_str = dict() for node in self.traverse_postorder(): if node.is_leaf(): if node.label is None: node_to_str[node] = '' else: node_to_str[node] = str(node.label) else: out = ['('] for c in node.children: out.append(node_to_str[c]) if c.edge_length is not None: if isinstance(c.edge_length,int): l_str = str(c.edge_length) elif isinstance(c.edge_length,float) and c.edge_length.is_integer(): l_str = str(int(c.edge_length)) else: l_str = str(c.edge_length) out.append(':%s' % l_str) out.append(',') del node_to_str[c] out.pop() # trailing comma out.append(')') if node.label is not None: out.append(str(node.label)) node_to_str[node] = ''.join(out) return node_to_str[self]
[ "def", "newick", "(", "self", ")", ":", "node_to_str", "=", "dict", "(", ")", "for", "node", "in", "self", ".", "traverse_postorder", "(", ")", ":", "if", "node", ".", "is_leaf", "(", ")", ":", "if", "node", ".", "label", "is", "None", ":", "node_t...
Newick string conversion starting at this ``Node`` object Returns: ``str``: Newick string conversion starting at this ``Node`` object
[ "Newick", "string", "conversion", "starting", "at", "this", "Node", "object" ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L128-L160
train
33,009
niemasd/TreeSwift
treeswift/Node.py
Node.remove_child
def remove_child(self, child): '''Remove child from ``Node`` object Args: ``child`` (``Node``): The child to remove ''' if not isinstance(child, Node): raise TypeError("child must be a Node") try: self.children.remove(child); child.parent = None except: raise RuntimeError("Attempting to remove non-existent child")
python
def remove_child(self, child): '''Remove child from ``Node`` object Args: ``child`` (``Node``): The child to remove ''' if not isinstance(child, Node): raise TypeError("child must be a Node") try: self.children.remove(child); child.parent = None except: raise RuntimeError("Attempting to remove non-existent child")
[ "def", "remove_child", "(", "self", ",", "child", ")", ":", "if", "not", "isinstance", "(", "child", ",", "Node", ")", ":", "raise", "TypeError", "(", "\"child must be a Node\"", ")", "try", ":", "self", ".", "children", ".", "remove", "(", "child", ")",...
Remove child from ``Node`` object Args: ``child`` (``Node``): The child to remove
[ "Remove", "child", "from", "Node", "object" ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L170-L181
train
33,010
niemasd/TreeSwift
treeswift/Node.py
Node.resolve_polytomies
def resolve_polytomies(self): '''Arbitrarily resolve polytomies below this ``Node`` with 0-lengthed edges.''' q = deque(); q.append(self) while len(q) != 0: node = q.popleft() while len(node.children) > 2: c1 = node.children.pop(); c2 = node.children.pop() nn = Node(edge_length=0); node.add_child(nn) nn.add_child(c1); nn.add_child(c2) q.extend(node.children)
python
def resolve_polytomies(self): '''Arbitrarily resolve polytomies below this ``Node`` with 0-lengthed edges.''' q = deque(); q.append(self) while len(q) != 0: node = q.popleft() while len(node.children) > 2: c1 = node.children.pop(); c2 = node.children.pop() nn = Node(edge_length=0); node.add_child(nn) nn.add_child(c1); nn.add_child(c2) q.extend(node.children)
[ "def", "resolve_polytomies", "(", "self", ")", ":", "q", "=", "deque", "(", ")", "q", ".", "append", "(", "self", ")", "while", "len", "(", "q", ")", "!=", "0", ":", "node", "=", "q", ".", "popleft", "(", ")", "while", "len", "(", "node", ".", ...
Arbitrarily resolve polytomies below this ``Node`` with 0-lengthed edges.
[ "Arbitrarily", "resolve", "polytomies", "below", "this", "Node", "with", "0", "-", "lengthed", "edges", "." ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L183-L192
train
33,011
niemasd/TreeSwift
treeswift/Node.py
Node.set_parent
def set_parent(self, parent): '''Set the parent of this ``Node`` object. Use this carefully, otherwise you may damage the structure of this ``Tree`` object. Args: ``Node``: The new parent of this ``Node`` ''' if not isinstance(parent, Node): raise TypeError("parent must be a Node") self.parent = parent
python
def set_parent(self, parent): '''Set the parent of this ``Node`` object. Use this carefully, otherwise you may damage the structure of this ``Tree`` object. Args: ``Node``: The new parent of this ``Node`` ''' if not isinstance(parent, Node): raise TypeError("parent must be a Node") self.parent = parent
[ "def", "set_parent", "(", "self", ",", "parent", ")", ":", "if", "not", "isinstance", "(", "parent", ",", "Node", ")", ":", "raise", "TypeError", "(", "\"parent must be a Node\"", ")", "self", ".", "parent", "=", "parent" ]
Set the parent of this ``Node`` object. Use this carefully, otherwise you may damage the structure of this ``Tree`` object. Args: ``Node``: The new parent of this ``Node``
[ "Set", "the", "parent", "of", "this", "Node", "object", ".", "Use", "this", "carefully", "otherwise", "you", "may", "damage", "the", "structure", "of", "this", "Tree", "object", "." ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L213-L221
train
33,012
niemasd/TreeSwift
treeswift/Node.py
Node.traverse_ancestors
def traverse_ancestors(self, include_self=True): '''Traverse over the ancestors of this ``Node`` Args: ``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False`` ''' if not isinstance(include_self, bool): raise TypeError("include_self must be a bool") if include_self: c = self else: c = self.parent while c is not None: yield c; c = c.parent
python
def traverse_ancestors(self, include_self=True): '''Traverse over the ancestors of this ``Node`` Args: ``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False`` ''' if not isinstance(include_self, bool): raise TypeError("include_self must be a bool") if include_self: c = self else: c = self.parent while c is not None: yield c; c = c.parent
[ "def", "traverse_ancestors", "(", "self", ",", "include_self", "=", "True", ")", ":", "if", "not", "isinstance", "(", "include_self", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"include_self must be a bool\"", ")", "if", "include_self", ":", "c", "=",...
Traverse over the ancestors of this ``Node`` Args: ``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False``
[ "Traverse", "over", "the", "ancestors", "of", "this", "Node" ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L223-L236
train
33,013
niemasd/TreeSwift
treeswift/Node.py
Node.traverse_inorder
def traverse_inorder(self, leaves=True, internal=True): '''Perform an inorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' c = self; s = deque(); done = False while not done: if c is None: if len(s) == 0: done = True else: c = s.pop() if (leaves and c.is_leaf()) or (internal and not c.is_leaf()): yield c if len(c.children) == 0: c = None elif len(c.children) == 2: c = c.children[1] else: raise RuntimeError(INORDER_NONBINARY) else: s.append(c) if len(c.children) == 0: c = None elif len(c.children) == 2: c = c.children[0] else: raise RuntimeError(INORDER_NONBINARY)
python
def traverse_inorder(self, leaves=True, internal=True): '''Perform an inorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' c = self; s = deque(); done = False while not done: if c is None: if len(s) == 0: done = True else: c = s.pop() if (leaves and c.is_leaf()) or (internal and not c.is_leaf()): yield c if len(c.children) == 0: c = None elif len(c.children) == 2: c = c.children[1] else: raise RuntimeError(INORDER_NONBINARY) else: s.append(c) if len(c.children) == 0: c = None elif len(c.children) == 2: c = c.children[0] else: raise RuntimeError(INORDER_NONBINARY)
[ "def", "traverse_inorder", "(", "self", ",", "leaves", "=", "True", ",", "internal", "=", "True", ")", ":", "c", "=", "self", "s", "=", "deque", "(", ")", "done", "=", "False", "while", "not", "done", ":", "if", "c", "is", "None", ":", "if", "len...
Perform an inorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
[ "Perform", "an", "inorder", "traversal", "starting", "at", "this", "Node", "object" ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L263-L293
train
33,014
niemasd/TreeSwift
treeswift/Node.py
Node.traverse_levelorder
def traverse_levelorder(self, leaves=True, internal=True): '''Perform a levelorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' q = deque(); q.append(self) while len(q) != 0: n = q.popleft() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n q.extend(n.children)
python
def traverse_levelorder(self, leaves=True, internal=True): '''Perform a levelorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' q = deque(); q.append(self) while len(q) != 0: n = q.popleft() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n q.extend(n.children)
[ "def", "traverse_levelorder", "(", "self", ",", "leaves", "=", "True", ",", "internal", "=", "True", ")", ":", "q", "=", "deque", "(", ")", "q", ".", "append", "(", "self", ")", "while", "len", "(", "q", ")", "!=", "0", ":", "n", "=", "q", ".",...
Perform a levelorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
[ "Perform", "a", "levelorder", "traversal", "starting", "at", "this", "Node", "object" ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L305-L318
train
33,015
niemasd/TreeSwift
treeswift/Node.py
Node.traverse_postorder
def traverse_postorder(self, leaves=True, internal=True): '''Perform a postorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' s1 = deque(); s2 = deque(); s1.append(self) while len(s1) != 0: n = s1.pop(); s2.append(n); s1.extend(n.children) while len(s2) != 0: n = s2.pop() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n
python
def traverse_postorder(self, leaves=True, internal=True): '''Perform a postorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' s1 = deque(); s2 = deque(); s1.append(self) while len(s1) != 0: n = s1.pop(); s2.append(n); s1.extend(n.children) while len(s2) != 0: n = s2.pop() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n
[ "def", "traverse_postorder", "(", "self", ",", "leaves", "=", "True", ",", "internal", "=", "True", ")", ":", "s1", "=", "deque", "(", ")", "s2", "=", "deque", "(", ")", "s1", ".", "append", "(", "self", ")", "while", "len", "(", "s1", ")", "!=",...
Perform a postorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
[ "Perform", "a", "postorder", "traversal", "starting", "at", "this", "Node", "object" ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L320-L334
train
33,016
niemasd/TreeSwift
treeswift/Node.py
Node.traverse_preorder
def traverse_preorder(self, leaves=True, internal=True): '''Perform a preorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' s = deque(); s.append(self) while len(s) != 0: n = s.pop() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n s.extend(n.children)
python
def traverse_preorder(self, leaves=True, internal=True): '''Perform a preorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' s = deque(); s.append(self) while len(s) != 0: n = s.pop() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n s.extend(n.children)
[ "def", "traverse_preorder", "(", "self", ",", "leaves", "=", "True", ",", "internal", "=", "True", ")", ":", "s", "=", "deque", "(", ")", "s", ".", "append", "(", "self", ")", "while", "len", "(", "s", ")", "!=", "0", ":", "n", "=", "s", ".", ...
Perform a preorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
[ "Perform", "a", "preorder", "traversal", "starting", "at", "this", "Node", "object" ]
7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L336-L349
train
33,017
piglei/uwsgi-sloth
uwsgi_sloth/structures.py
ValuesAggregation.merge_with
def merge_with(self, other): """Merge this ``ValuesAggregation`` with another one""" result = ValuesAggregation() result.total = self.total + other.total result.count = self.count + other.count result.min = min(self.min, other.min) result.max = max(self.max, other.max) return result
python
def merge_with(self, other): """Merge this ``ValuesAggregation`` with another one""" result = ValuesAggregation() result.total = self.total + other.total result.count = self.count + other.count result.min = min(self.min, other.min) result.max = max(self.max, other.max) return result
[ "def", "merge_with", "(", "self", ",", "other", ")", ":", "result", "=", "ValuesAggregation", "(", ")", "result", ".", "total", "=", "self", ".", "total", "+", "other", ".", "total", "result", ".", "count", "=", "self", ".", "count", "+", "other", "....
Merge this ``ValuesAggregation`` with another one
[ "Merge", "this", "ValuesAggregation", "with", "another", "one" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/structures.py#L35-L42
train
33,018
piglei/uwsgi-sloth
uwsgi_sloth/utils.py
parse_url_rules
def parse_url_rules(urls_fp): """URL rules from given fp""" url_rules = [] for line in urls_fp: re_url = line.strip() if re_url: url_rules.append({'str': re_url, 're': re.compile(re_url)}) return url_rules
python
def parse_url_rules(urls_fp): """URL rules from given fp""" url_rules = [] for line in urls_fp: re_url = line.strip() if re_url: url_rules.append({'str': re_url, 're': re.compile(re_url)}) return url_rules
[ "def", "parse_url_rules", "(", "urls_fp", ")", ":", "url_rules", "=", "[", "]", "for", "line", "in", "urls_fp", ":", "re_url", "=", "line", ".", "strip", "(", ")", "if", "re_url", ":", "url_rules", ".", "append", "(", "{", "'str'", ":", "re_url", ","...
URL rules from given fp
[ "URL", "rules", "from", "given", "fp" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/utils.py#L5-L12
train
33,019
piglei/uwsgi-sloth
uwsgi_sloth/utils.py
force_bytes
def force_bytes(s, encoding='utf-8', errors='strict'): """A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes """ # Handle the common case first for performance reasons. if isinstance(s, bytes): if encoding == 'utf-8': return s else: return s.decode('utf-8', errors).encode(encoding, errors) else: return s.encode(encoding, errors)
python
def force_bytes(s, encoding='utf-8', errors='strict'): """A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes """ # Handle the common case first for performance reasons. if isinstance(s, bytes): if encoding == 'utf-8': return s else: return s.decode('utf-8', errors).encode(encoding, errors) else: return s.encode(encoding, errors)
[ "def", "force_bytes", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "# Handle the common case first for performance reasons.", "if", "isinstance", "(", "s", ",", "bytes", ")", ":", "if", "encoding", "==", "'utf-8'", ":", ...
A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes
[ "A", "function", "turns", "s", "into", "bytes", "object", "similar", "to", "django", ".", "utils", ".", "encoding", ".", "force_bytes" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/utils.py#L25-L35
train
33,020
piglei/uwsgi-sloth
uwsgi_sloth/utils.py
force_text
def force_text(s, encoding='utf-8', errors='strict'): """A function turns "s" into text type, similar to django.utils.encoding.force_text """ if issubclass(type(s), str): return s try: if isinstance(s, bytes): s = str(s, encoding, errors) else: s = str(s) except UnicodeDecodeError as e: raise DjangoUnicodeDecodeError(s, *e.args) return s
python
def force_text(s, encoding='utf-8', errors='strict'): """A function turns "s" into text type, similar to django.utils.encoding.force_text """ if issubclass(type(s), str): return s try: if isinstance(s, bytes): s = str(s, encoding, errors) else: s = str(s) except UnicodeDecodeError as e: raise DjangoUnicodeDecodeError(s, *e.args) return s
[ "def", "force_text", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "if", "issubclass", "(", "type", "(", "s", ")", ",", "str", ")", ":", "return", "s", "try", ":", "if", "isinstance", "(", "s", ",", "bytes", ...
A function turns "s" into text type, similar to django.utils.encoding.force_text
[ "A", "function", "turns", "s", "into", "text", "type", "similar", "to", "django", ".", "utils", ".", "encoding", ".", "force_text" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/utils.py#L38-L50
train
33,021
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.set_XRef
def set_XRef(self, X=None, indtX=None, indtXlamb=None): """ Reset the reference X Useful if to replace channel indices by a time-vraying quantity e.g.: distance to the magnetic axis """ out = self._checkformat_inputs_XRef(X=X, indtX=indtX, indXlamb=indtXlamb) X, nnch, indtX, indXlamb, indtXlamb = out self._ddataRef['X'] = X self._ddataRef['nnch'] = nnch self._ddataRef['indtX'] = indtX self._ddataRef['indtXlamb'] = indtXlamb self._ddata['uptodate'] = False
python
def set_XRef(self, X=None, indtX=None, indtXlamb=None): """ Reset the reference X Useful if to replace channel indices by a time-vraying quantity e.g.: distance to the magnetic axis """ out = self._checkformat_inputs_XRef(X=X, indtX=indtX, indXlamb=indtXlamb) X, nnch, indtX, indXlamb, indtXlamb = out self._ddataRef['X'] = X self._ddataRef['nnch'] = nnch self._ddataRef['indtX'] = indtX self._ddataRef['indtXlamb'] = indtXlamb self._ddata['uptodate'] = False
[ "def", "set_XRef", "(", "self", ",", "X", "=", "None", ",", "indtX", "=", "None", ",", "indtXlamb", "=", "None", ")", ":", "out", "=", "self", ".", "_checkformat_inputs_XRef", "(", "X", "=", "X", ",", "indtX", "=", "indtX", ",", "indXlamb", "=", "i...
Reset the reference X Useful if to replace channel indices by a time-vraying quantity e.g.: distance to the magnetic axis
[ "Reset", "the", "reference", "X" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L888-L902
train
33,022
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.set_dtreat_indt
def set_dtreat_indt(self, t=None, indt=None): """ Store the desired index array for the time vector If an array of indices (refering to self.ddataRef['t'] is not provided, uses self.select_t(t=t) to produce it """ lC = [indt is not None, t is not None] if all(lC): msg = "Please provide either t or indt (or none)!" raise Exception(msg) if lC[1]: ind = self.select_t(t=t, out=bool) else: ind = _format_ind(indt, n=self._ddataRef['nt']) self._dtreat['indt'] = ind self._ddata['uptodate'] = False
python
def set_dtreat_indt(self, t=None, indt=None): """ Store the desired index array for the time vector If an array of indices (refering to self.ddataRef['t'] is not provided, uses self.select_t(t=t) to produce it """ lC = [indt is not None, t is not None] if all(lC): msg = "Please provide either t or indt (or none)!" raise Exception(msg) if lC[1]: ind = self.select_t(t=t, out=bool) else: ind = _format_ind(indt, n=self._ddataRef['nt']) self._dtreat['indt'] = ind self._ddata['uptodate'] = False
[ "def", "set_dtreat_indt", "(", "self", ",", "t", "=", "None", ",", "indt", "=", "None", ")", ":", "lC", "=", "[", "indt", "is", "not", "None", ",", "t", "is", "not", "None", "]", "if", "all", "(", "lC", ")", ":", "msg", "=", "\"Please provide eit...
Store the desired index array for the time vector If an array of indices (refering to self.ddataRef['t'] is not provided, uses self.select_t(t=t) to produce it
[ "Store", "the", "desired", "index", "array", "for", "the", "time", "vector" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L904-L921
train
33,023
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.set_dtreat_indch
def set_dtreat_indch(self, indch=None): """ Store the desired index array for the channels If None => all channels Must be a 1d array """ if indch is not None: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['indch'] = indch self._ddata['uptodate'] = False
python
def set_dtreat_indch(self, indch=None): """ Store the desired index array for the channels If None => all channels Must be a 1d array """ if indch is not None: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['indch'] = indch self._ddata['uptodate'] = False
[ "def", "set_dtreat_indch", "(", "self", ",", "indch", "=", "None", ")", ":", "if", "indch", "is", "not", "None", ":", "indch", "=", "np", ".", "asarray", "(", "indch", ")", "assert", "indch", ".", "ndim", "==", "1", "indch", "=", "_format_ind", "(", ...
Store the desired index array for the channels If None => all channels Must be a 1d array
[ "Store", "the", "desired", "index", "array", "for", "the", "channels" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L923-L935
train
33,024
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.set_dtreat_indlamb
def set_dtreat_indlamb(self, indlamb=None): """ Store the desired index array for the wavelength If None => all wavelengths Must be a 1d array """ if not self._isSpectral(): msg = "The wavelength can only be set with DataSpectral object !" raise Exception(msg) if indlamb is not None: indlamb = np.asarray(indlamb) assert indlamb.ndim==1 indlamb = _format_ind(indlamb, n=self._ddataRef['nlamb']) self._dtreat['indlamb'] = indlamb self._ddata['uptodate'] = False
python
def set_dtreat_indlamb(self, indlamb=None): """ Store the desired index array for the wavelength If None => all wavelengths Must be a 1d array """ if not self._isSpectral(): msg = "The wavelength can only be set with DataSpectral object !" raise Exception(msg) if indlamb is not None: indlamb = np.asarray(indlamb) assert indlamb.ndim==1 indlamb = _format_ind(indlamb, n=self._ddataRef['nlamb']) self._dtreat['indlamb'] = indlamb self._ddata['uptodate'] = False
[ "def", "set_dtreat_indlamb", "(", "self", ",", "indlamb", "=", "None", ")", ":", "if", "not", "self", ".", "_isSpectral", "(", ")", ":", "msg", "=", "\"The wavelength can only be set with DataSpectral object !\"", "raise", "Exception", "(", "msg", ")", "if", "in...
Store the desired index array for the wavelength If None => all wavelengths Must be a 1d array
[ "Store", "the", "desired", "index", "array", "for", "the", "wavelength" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L937-L952
train
33,025
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.set_dtreat_interp_indt
def set_dtreat_interp_indt(self, indt=None): """ Set the indices of the times for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices => interpolate data at these times for all channels - A dict with: * keys = int indices of channels * values = array of int indices of times at which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X'] """ lC = [indt is None, type(indt) in [np.ndarray,list], type(indt) is dict] assert any(lC) if lC[2]: lc = [type(k) is int and k<self._ddataRef['nch'] for k in indt.keys()] assert all(lc) for k in indt.keys(): assert hasattr(indt[k],'__iter__') indt[k] = _format_ind(indt[k], n=self._ddataRef['nt']) elif lC[1]: indt = np.asarray(indt) assert indt.ndim==1 indt = _format_ind(indt, n=self._ddataRef['nt']) self._dtreat['interp-indt'] = indt self._ddata['uptodate'] = False
python
def set_dtreat_interp_indt(self, indt=None): """ Set the indices of the times for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices => interpolate data at these times for all channels - A dict with: * keys = int indices of channels * values = array of int indices of times at which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X'] """ lC = [indt is None, type(indt) in [np.ndarray,list], type(indt) is dict] assert any(lC) if lC[2]: lc = [type(k) is int and k<self._ddataRef['nch'] for k in indt.keys()] assert all(lc) for k in indt.keys(): assert hasattr(indt[k],'__iter__') indt[k] = _format_ind(indt[k], n=self._ddataRef['nt']) elif lC[1]: indt = np.asarray(indt) assert indt.ndim==1 indt = _format_ind(indt, n=self._ddataRef['nt']) self._dtreat['interp-indt'] = indt self._ddata['uptodate'] = False
[ "def", "set_dtreat_interp_indt", "(", "self", ",", "indt", "=", "None", ")", ":", "lC", "=", "[", "indt", "is", "None", ",", "type", "(", "indt", ")", "in", "[", "np", ".", "ndarray", ",", "list", "]", ",", "type", "(", "indt", ")", "is", "dict",...
Set the indices of the times for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices => interpolate data at these times for all channels - A dict with: * keys = int indices of channels * values = array of int indices of times at which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X']
[ "Set", "the", "indices", "of", "the", "times", "for", "which", "to", "interpolate", "data" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L998-L1024
train
33,026
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.set_dtreat_interp_indch
def set_dtreat_interp_indch(self, indch=None): """ Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X'] """ lC = [indch is None, type(indch) in [np.ndarray,list], type(indch) is dict] assert any(lC) if lC[2]: lc = [type(k) is int and k<self._ddataRef['nt'] for k in indch.keys()] assert all(lc) for k in indch.keys(): assert hasattr(indch[k],'__iter__') indch[k] = _format_ind(indch[k], n=self._ddataRef['nch']) elif lC[1]: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['interp-indch'] = indch self._ddata['uptodate'] = False
python
def set_dtreat_interp_indch(self, indch=None): """ Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X'] """ lC = [indch is None, type(indch) in [np.ndarray,list], type(indch) is dict] assert any(lC) if lC[2]: lc = [type(k) is int and k<self._ddataRef['nt'] for k in indch.keys()] assert all(lc) for k in indch.keys(): assert hasattr(indch[k],'__iter__') indch[k] = _format_ind(indch[k], n=self._ddataRef['nch']) elif lC[1]: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['interp-indch'] = indch self._ddata['uptodate'] = False
[ "def", "set_dtreat_interp_indch", "(", "self", ",", "indch", "=", "None", ")", ":", "lC", "=", "[", "indch", "is", "None", ",", "type", "(", "indch", ")", "in", "[", "np", ".", "ndarray", ",", "list", "]", ",", "type", "(", "indch", ")", "is", "d...
Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X']
[ "Set", "the", "indices", "of", "the", "channels", "for", "which", "to", "interpolate", "data" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1026-L1052
train
33,027
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.set_dtreat_dfit
def set_dtreat_dfit(self, dfit=None): """ Set the fitting dictionnary A dict contaning all parameters for fitting the data Valid dict content includes: - 'type': str 'fft': A fourier filtering 'svd': A svd filtering """ warnings.warn("Not implemented yet !, dfit forced to None") dfit = None assert dfit is None or isinstance(dfit,dict) if isinstance(dfit,dict): assert 'type' in dfit.keys() assert dfit['type'] in ['svd','fft'] self._dtreat['dfit'] = dfit self._ddata['uptodate'] = False
python
def set_dtreat_dfit(self, dfit=None): """ Set the fitting dictionnary A dict contaning all parameters for fitting the data Valid dict content includes: - 'type': str 'fft': A fourier filtering 'svd': A svd filtering """ warnings.warn("Not implemented yet !, dfit forced to None") dfit = None assert dfit is None or isinstance(dfit,dict) if isinstance(dfit,dict): assert 'type' in dfit.keys() assert dfit['type'] in ['svd','fft'] self._dtreat['dfit'] = dfit self._ddata['uptodate'] = False
[ "def", "set_dtreat_dfit", "(", "self", ",", "dfit", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"Not implemented yet !, dfit forced to None\"", ")", "dfit", "=", "None", "assert", "dfit", "is", "None", "or", "isinstance", "(", "dfit", ",", "dict", ...
Set the fitting dictionnary A dict contaning all parameters for fitting the data Valid dict content includes: - 'type': str 'fft': A fourier filtering 'svd': A svd filtering
[ "Set", "the", "fitting", "dictionnary" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1054-L1073
train
33,028
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.set_dtreat_interpt
def set_dtreat_interpt(self, t=None): """ Set the time vector on which to interpolate the data """ if t is not None: t = np.unique(np.asarray(t, dtype=float).ravel()) self._dtreat['interp-t'] = t
python
def set_dtreat_interpt(self, t=None): """ Set the time vector on which to interpolate the data """ if t is not None: t = np.unique(np.asarray(t, dtype=float).ravel()) self._dtreat['interp-t'] = t
[ "def", "set_dtreat_interpt", "(", "self", ",", "t", "=", "None", ")", ":", "if", "t", "is", "not", "None", ":", "t", "=", "np", ".", "unique", "(", "np", ".", "asarray", "(", "t", ",", "dtype", "=", "float", ")", ".", "ravel", "(", ")", ")", ...
Set the time vector on which to interpolate the data
[ "Set", "the", "time", "vector", "on", "which", "to", "interpolate", "the", "data" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1075-L1079
train
33,029
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.set_dtreat_order
def set_dtreat_order(self, order=None): """ Set the order in which the data treatment should be performed Provide an ordered list of keywords indicating the order in which you wish the data treatment steps to be performed. Each keyword corresponds to a step. Available steps are (in default order): - 'mask' : - 'interp_indt' : - 'interp_indch' : - 'data0' : - 'dfit' : - 'indt' : - 'indch' : - 'interp_t': All steps are performed on the stored reference self.dataRef['data'] Thus, the time and channels restriction must be the last 2 steps before interpolating on an external time vector """ if order is None: order = list(self._ddef['dtreat']['order']) assert type(order) is list and all([type(ss) is str for ss in order]) if not all([ss in ['indt','indch','indlamb'] for ss in order][-4:-1]): msg = "indt and indch must be the treatment steps -2 and -3 !" raise Exception(msg) if not order[-1]=='interp-t': msg = "interp-t must be the last treatment step !" raise Exception(msg) self._dtreat['order'] = order self._ddata['uptodate'] = False
python
def set_dtreat_order(self, order=None): """ Set the order in which the data treatment should be performed Provide an ordered list of keywords indicating the order in which you wish the data treatment steps to be performed. Each keyword corresponds to a step. Available steps are (in default order): - 'mask' : - 'interp_indt' : - 'interp_indch' : - 'data0' : - 'dfit' : - 'indt' : - 'indch' : - 'interp_t': All steps are performed on the stored reference self.dataRef['data'] Thus, the time and channels restriction must be the last 2 steps before interpolating on an external time vector """ if order is None: order = list(self._ddef['dtreat']['order']) assert type(order) is list and all([type(ss) is str for ss in order]) if not all([ss in ['indt','indch','indlamb'] for ss in order][-4:-1]): msg = "indt and indch must be the treatment steps -2 and -3 !" raise Exception(msg) if not order[-1]=='interp-t': msg = "interp-t must be the last treatment step !" raise Exception(msg) self._dtreat['order'] = order self._ddata['uptodate'] = False
[ "def", "set_dtreat_order", "(", "self", ",", "order", "=", "None", ")", ":", "if", "order", "is", "None", ":", "order", "=", "list", "(", "self", ".", "_ddef", "[", "'dtreat'", "]", "[", "'order'", "]", ")", "assert", "type", "(", "order", ")", "is...
Set the order in which the data treatment should be performed Provide an ordered list of keywords indicating the order in which you wish the data treatment steps to be performed. Each keyword corresponds to a step. Available steps are (in default order): - 'mask' : - 'interp_indt' : - 'interp_indch' : - 'data0' : - 'dfit' : - 'indt' : - 'indch' : - 'interp_t': All steps are performed on the stored reference self.dataRef['data'] Thus, the time and channels restriction must be the last 2 steps before interpolating on an external time vector
[ "Set", "the", "order", "in", "which", "the", "data", "treatment", "should", "be", "performed" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1208-L1238
train
33,030
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.clear_ddata
def clear_ddata(self): """ Clear the working copy of data Harmless, as it preserves the reference copy and the treatment dict Use only to free some memory """ self._ddata = dict.fromkeys(self._get_keys_ddata()) self._ddata['uptodate'] = False
python
def clear_ddata(self): """ Clear the working copy of data Harmless, as it preserves the reference copy and the treatment dict Use only to free some memory """ self._ddata = dict.fromkeys(self._get_keys_ddata()) self._ddata['uptodate'] = False
[ "def", "clear_ddata", "(", "self", ")", ":", "self", ".", "_ddata", "=", "dict", ".", "fromkeys", "(", "self", ".", "_get_keys_ddata", "(", ")", ")", "self", ".", "_ddata", "[", "'uptodate'", "]", "=", "False" ]
Clear the working copy of data Harmless, as it preserves the reference copy and the treatment dict Use only to free some memory
[ "Clear", "the", "working", "copy", "of", "data" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1341-L1349
train
33,031
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.dchans
def dchans(self, key=None): """ Return the dchans updated with indch Return a dict with all keys if key=None """ if self._dtreat['indch'] is None or np.all(self._dtreat['indch']): dch = dict(self._dchans) if key is None else self._dchans[key] else: dch = {} lk = self._dchans.keys() if key is None else [key] for kk in lk: if self._dchans[kk].ndim==1: dch[kk] = self._dchans[kk][self._dtreat['indch']] elif self._dchans[kk].ndim==2: dch[kk] = self._dchans[kk][:,self._dtreat['indch']] else: msg = "Don't know how to treat self._dchans[%s]:"%kk msg += "\n shape = %s"%(kk,str(self._dchans[kk].shape)) warnings.warn(msg) if key is not None: dch = dch[key] return dch
python
def dchans(self, key=None): """ Return the dchans updated with indch Return a dict with all keys if key=None """ if self._dtreat['indch'] is None or np.all(self._dtreat['indch']): dch = dict(self._dchans) if key is None else self._dchans[key] else: dch = {} lk = self._dchans.keys() if key is None else [key] for kk in lk: if self._dchans[kk].ndim==1: dch[kk] = self._dchans[kk][self._dtreat['indch']] elif self._dchans[kk].ndim==2: dch[kk] = self._dchans[kk][:,self._dtreat['indch']] else: msg = "Don't know how to treat self._dchans[%s]:"%kk msg += "\n shape = %s"%(kk,str(self._dchans[kk].shape)) warnings.warn(msg) if key is not None: dch = dch[key] return dch
[ "def", "dchans", "(", "self", ",", "key", "=", "None", ")", ":", "if", "self", ".", "_dtreat", "[", "'indch'", "]", "is", "None", "or", "np", ".", "all", "(", "self", ".", "_dtreat", "[", "'indch'", "]", ")", ":", "dch", "=", "dict", "(", "self...
Return the dchans updated with indch Return a dict with all keys if key=None
[ "Return", "the", "dchans", "updated", "with", "indch" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1369-L1391
train
33,032
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.select_t
def select_t(self, t=None, out=bool): """ Return a time index array Return a boolean or integer index array, hereafter called 'ind' The array refers to the reference time vector self.ddataRef['t'] Parameters ---------- t : None / float / np.ndarray / list / tuple The time values to be selected: - None : ind matches all time values - float : ind is True only for the time closest to t - np.ndarray : ind is True only for the times closest to t - list (len()==2): ind is True for the times inside [t[0],t[1]] - tuple (len()==2): ind is True for times outside ]t[0];t[1][ out : type Specifies the type of the output index array: - bool : return a boolean array of shape (self.ddataRef['nt'],) - int : return the array as integers indices Return ------ ind : np.ndarray The array of indices, of dtype specified by keywordarg out """ assert out in [bool,int] ind = _select_ind(t, self._ddataRef['t'], self._ddataRef['nt']) if out is int: ind = ind.nonzero()[0] return ind
python
def select_t(self, t=None, out=bool): """ Return a time index array Return a boolean or integer index array, hereafter called 'ind' The array refers to the reference time vector self.ddataRef['t'] Parameters ---------- t : None / float / np.ndarray / list / tuple The time values to be selected: - None : ind matches all time values - float : ind is True only for the time closest to t - np.ndarray : ind is True only for the times closest to t - list (len()==2): ind is True for the times inside [t[0],t[1]] - tuple (len()==2): ind is True for times outside ]t[0];t[1][ out : type Specifies the type of the output index array: - bool : return a boolean array of shape (self.ddataRef['nt'],) - int : return the array as integers indices Return ------ ind : np.ndarray The array of indices, of dtype specified by keywordarg out """ assert out in [bool,int] ind = _select_ind(t, self._ddataRef['t'], self._ddataRef['nt']) if out is int: ind = ind.nonzero()[0] return ind
[ "def", "select_t", "(", "self", ",", "t", "=", "None", ",", "out", "=", "bool", ")", ":", "assert", "out", "in", "[", "bool", ",", "int", "]", "ind", "=", "_select_ind", "(", "t", ",", "self", ".", "_ddataRef", "[", "'t'", "]", ",", "self", "."...
Return a time index array Return a boolean or integer index array, hereafter called 'ind' The array refers to the reference time vector self.ddataRef['t'] Parameters ---------- t : None / float / np.ndarray / list / tuple The time values to be selected: - None : ind matches all time values - float : ind is True only for the time closest to t - np.ndarray : ind is True only for the times closest to t - list (len()==2): ind is True for the times inside [t[0],t[1]] - tuple (len()==2): ind is True for times outside ]t[0];t[1][ out : type Specifies the type of the output index array: - bool : return a boolean array of shape (self.ddataRef['nt'],) - int : return the array as integers indices Return ------ ind : np.ndarray The array of indices, of dtype specified by keywordarg out
[ "Return", "a", "time", "index", "array" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1398-L1428
train
33,033
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.select_lamb
def select_lamb(self, lamb=None, out=bool): """ Return a wavelength index array Return a boolean or integer index array, hereafter called 'ind' The array refers to the reference time vector self.ddataRef['lamb'] Parameters ---------- lamb : None / float / np.ndarray / list / tuple The time values to be selected: - None : ind matches all wavelength values - float : ind is True only for the wavelength closest to lamb - np.ndarray : ind True only for the wavelength closest to lamb - list (len()==2): ind True for wavelength in [lamb[0],lamb[1]] - tuple (len()==2): ind True for wavelength outside ]t[0];t[1][ out : type Specifies the type of the output index array: - bool : return a boolean array of shape (self.ddataRef['nlamb'],) - int : return the array as integers indices Return ------ ind : np.ndarray The array of indices, of dtype specified by keywordarg out """ if not self._isSpectral(): msg = "" raise Exception(msg) assert out in [bool,int] ind = _select_ind(lamb, self._ddataRef['lamb'], self._ddataRef['nlamb']) if out is int: ind = ind.nonzero()[0] return ind
python
def select_lamb(self, lamb=None, out=bool): """ Return a wavelength index array Return a boolean or integer index array, hereafter called 'ind' The array refers to the reference time vector self.ddataRef['lamb'] Parameters ---------- lamb : None / float / np.ndarray / list / tuple The time values to be selected: - None : ind matches all wavelength values - float : ind is True only for the wavelength closest to lamb - np.ndarray : ind True only for the wavelength closest to lamb - list (len()==2): ind True for wavelength in [lamb[0],lamb[1]] - tuple (len()==2): ind True for wavelength outside ]t[0];t[1][ out : type Specifies the type of the output index array: - bool : return a boolean array of shape (self.ddataRef['nlamb'],) - int : return the array as integers indices Return ------ ind : np.ndarray The array of indices, of dtype specified by keywordarg out """ if not self._isSpectral(): msg = "" raise Exception(msg) assert out in [bool,int] ind = _select_ind(lamb, self._ddataRef['lamb'], self._ddataRef['nlamb']) if out is int: ind = ind.nonzero()[0] return ind
[ "def", "select_lamb", "(", "self", ",", "lamb", "=", "None", ",", "out", "=", "bool", ")", ":", "if", "not", "self", ".", "_isSpectral", "(", ")", ":", "msg", "=", "\"\"", "raise", "Exception", "(", "msg", ")", "assert", "out", "in", "[", "bool", ...
Return a wavelength index array Return a boolean or integer index array, hereafter called 'ind' The array refers to the reference time vector self.ddataRef['lamb'] Parameters ---------- lamb : None / float / np.ndarray / list / tuple The time values to be selected: - None : ind matches all wavelength values - float : ind is True only for the wavelength closest to lamb - np.ndarray : ind True only for the wavelength closest to lamb - list (len()==2): ind True for wavelength in [lamb[0],lamb[1]] - tuple (len()==2): ind True for wavelength outside ]t[0];t[1][ out : type Specifies the type of the output index array: - bool : return a boolean array of shape (self.ddataRef['nlamb'],) - int : return the array as integers indices Return ------ ind : np.ndarray The array of indices, of dtype specified by keywordarg out
[ "Return", "a", "wavelength", "index", "array" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1545-L1578
train
33,034
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.plot
def plot(self, key=None, cmap=None, ms=4, vmin=None, vmax=None, vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False, ntMax=None, nchMax=None, nlbdMax=3, lls=None, lct=None, lcch=None, lclbd=None, cbck=None, inct=[1,10], incX=[1,5], inclbd=[1,10], fmt_t='06.3f', fmt_X='01.0f', invert=True, Lplot='In', dmarker=None, Bck=True, fs=None, dmargin=None, wintit=None, tit=None, fontsize=None, labelpad=None, draw=True, connect=True): """ Plot the data content in a generic interactive figure """ kh = _plot.Data_plot(self, key=key, indref=0, cmap=cmap, ms=ms, vmin=vmin, vmax=vmax, vmin_map=vmin_map, vmax_map=vmax_map, cmap_map=cmap_map, normt_map=normt_map, ntMax=ntMax, nchMax=nchMax, nlbdMax=nlbdMax, lls=lls, lct=lct, lcch=lcch, lclbd=lclbd, cbck=cbck, inct=inct, incX=incX, inclbd=inclbd, fmt_t=fmt_t, fmt_X=fmt_X, Lplot=Lplot, invert=invert, dmarker=dmarker, Bck=Bck, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, fontsize=fontsize, labelpad=labelpad, draw=draw, connect=connect) return kh
python
def plot(self, key=None, cmap=None, ms=4, vmin=None, vmax=None, vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False, ntMax=None, nchMax=None, nlbdMax=3, lls=None, lct=None, lcch=None, lclbd=None, cbck=None, inct=[1,10], incX=[1,5], inclbd=[1,10], fmt_t='06.3f', fmt_X='01.0f', invert=True, Lplot='In', dmarker=None, Bck=True, fs=None, dmargin=None, wintit=None, tit=None, fontsize=None, labelpad=None, draw=True, connect=True): """ Plot the data content in a generic interactive figure """ kh = _plot.Data_plot(self, key=key, indref=0, cmap=cmap, ms=ms, vmin=vmin, vmax=vmax, vmin_map=vmin_map, vmax_map=vmax_map, cmap_map=cmap_map, normt_map=normt_map, ntMax=ntMax, nchMax=nchMax, nlbdMax=nlbdMax, lls=lls, lct=lct, lcch=lcch, lclbd=lclbd, cbck=cbck, inct=inct, incX=incX, inclbd=inclbd, fmt_t=fmt_t, fmt_X=fmt_X, Lplot=Lplot, invert=invert, dmarker=dmarker, Bck=Bck, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, fontsize=fontsize, labelpad=labelpad, draw=draw, connect=connect) return kh
[ "def", "plot", "(", "self", ",", "key", "=", "None", ",", "cmap", "=", "None", ",", "ms", "=", "4", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "vmin_map", "=", "None", ",", "vmax_map", "=", "None", ",", "cmap_map", "=", "None", ",...
Plot the data content in a generic interactive figure
[ "Plot", "the", "data", "content", "in", "a", "generic", "interactive", "figure" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1582-L1605
train
33,035
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.plot_compare
def plot_compare(self, lD, key=None, cmap=None, ms=4, vmin=None, vmax=None, vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False, ntMax=None, nchMax=None, nlbdMax=3, lls=None, lct=None, lcch=None, lclbd=None, cbck=None, inct=[1,10], incX=[1,5], inclbd=[1,10], fmt_t='06.3f', fmt_X='01.0f', fmt_l='07.3f', invert=True, Lplot='In', dmarker=None, sharey=True, sharelamb=True, Bck=True, fs=None, dmargin=None, wintit=None, tit=None, fontsize=None, labelpad=None, draw=True, connect=True): """ Plot several Data instances of the same diag Useful to compare : - the diag data for 2 different shots - experimental vs synthetic data for the same shot """ C0 = isinstance(lD,list) C0 = C0 and all([issubclass(dd.__class__,DataAbstract) for dd in lD]) C1 = issubclass(lD.__class__,DataAbstract) assert C0 or C1, 'Provided first arg. must be a tf.data.DataAbstract or list !' lD = [lD] if C1 else lD kh = _plot.Data_plot([self]+lD, key=key, indref=0, cmap=cmap, ms=ms, vmin=vmin, vmax=vmax, vmin_map=vmin_map, vmax_map=vmax_map, cmap_map=cmap_map, normt_map=normt_map, ntMax=ntMax, nchMax=nchMax, nlbdMax=nlbdMax, lls=lls, lct=lct, lcch=lcch, lclbd=lclbd, cbck=cbck, inct=inct, incX=incX, inclbd=inclbd, fmt_t=fmt_t, fmt_X=fmt_X, fmt_l=fmt_l, Lplot=Lplot, invert=invert, dmarker=dmarker, Bck=Bck, sharey=sharey, sharelamb=sharelamb, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, fontsize=fontsize, labelpad=labelpad, draw=draw, connect=connect) return kh
python
def plot_compare(self, lD, key=None, cmap=None, ms=4, vmin=None, vmax=None, vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False, ntMax=None, nchMax=None, nlbdMax=3, lls=None, lct=None, lcch=None, lclbd=None, cbck=None, inct=[1,10], incX=[1,5], inclbd=[1,10], fmt_t='06.3f', fmt_X='01.0f', fmt_l='07.3f', invert=True, Lplot='In', dmarker=None, sharey=True, sharelamb=True, Bck=True, fs=None, dmargin=None, wintit=None, tit=None, fontsize=None, labelpad=None, draw=True, connect=True): """ Plot several Data instances of the same diag Useful to compare : - the diag data for 2 different shots - experimental vs synthetic data for the same shot """ C0 = isinstance(lD,list) C0 = C0 and all([issubclass(dd.__class__,DataAbstract) for dd in lD]) C1 = issubclass(lD.__class__,DataAbstract) assert C0 or C1, 'Provided first arg. must be a tf.data.DataAbstract or list !' lD = [lD] if C1 else lD kh = _plot.Data_plot([self]+lD, key=key, indref=0, cmap=cmap, ms=ms, vmin=vmin, vmax=vmax, vmin_map=vmin_map, vmax_map=vmax_map, cmap_map=cmap_map, normt_map=normt_map, ntMax=ntMax, nchMax=nchMax, nlbdMax=nlbdMax, lls=lls, lct=lct, lcch=lcch, lclbd=lclbd, cbck=cbck, inct=inct, incX=incX, inclbd=inclbd, fmt_t=fmt_t, fmt_X=fmt_X, fmt_l=fmt_l, Lplot=Lplot, invert=invert, dmarker=dmarker, Bck=Bck, sharey=sharey, sharelamb=sharelamb, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, fontsize=fontsize, labelpad=labelpad, draw=draw, connect=connect) return kh
[ "def", "plot_compare", "(", "self", ",", "lD", ",", "key", "=", "None", ",", "cmap", "=", "None", ",", "ms", "=", "4", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "vmin_map", "=", "None", ",", "vmax_map", "=", "None", ",", "cmap_map"...
Plot several Data instances of the same diag Useful to compare : - the diag data for 2 different shots - experimental vs synthetic data for the same shot
[ "Plot", "several", "Data", "instances", "of", "the", "same", "diag" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1607-L1643
train
33,036
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.calc_spectrogram
def calc_spectrogram(self, fmin=None, method='scipy-fourier', deg=False, window='hann', detrend='linear', nperseg=None, noverlap=None, boundary='constant', padded=True, wave='morlet', warn=True): """ Return the power spectrum density for each channel The power spectrum density is computed with the chosen method Parameters ---------- fmin : None / float The minimum frequency of interest If None, set to 5/T, where T is the whole time interval Used to constrain the number of points per window deg : bool Flag indicating whether to return the phase in deg (vs rad) method : str Flag indicating which method to use for computation: - 'scipy-fourier': uses scipy.signal.spectrogram() (windowed fast fourier transform) - 'scipy-stft': uses scipy.signal.stft() (short time fourier transform) - 'scipy-wavelet': uses scipy.signal.cwt() (continuous wavelet transform) The following keyword args are fed to one of these scipy functions See the corresponding online scipy documentation for details on each function and its arguments window : None / str / tuple If method='scipy-fourier' Flag indicating which type of window to use detrend : None / str If method='scipy-fourier' Flag indicating whether and how to remove the trend of the signal nperseg : None / int If method='scipy-fourier' Number of points to the used for each window If None, deduced from fmin noverlap: If method='scipy-fourier' Number of points on which successive windows should overlap If None, nperseg-1 boundary: If method='scipy-stft' padded : If method='scipy-stft' d wave: None / str If method='scipy-wavelet' Return ------ tf : np.ndarray Time vector of the spectrogram (1D) f: np.ndarray frequency vector of the spectrogram (1D) lspect: list of np.ndarrays list of () spectrograms """ if self._isSpectral(): msg = "spectrogram not implemented yet for spectral data class" raise Exception(msg) tf, f, lpsd, lang = _comp.spectrogram(self.data, self.t, fmin=fmin, deg=deg, method=method, window=window, detrend=detrend, nperseg=nperseg, noverlap=noverlap, boundary=boundary, padded=padded, wave=wave, warn=warn) return tf, f, lpsd, lang
python
def calc_spectrogram(self, fmin=None, method='scipy-fourier', deg=False, window='hann', detrend='linear', nperseg=None, noverlap=None, boundary='constant', padded=True, wave='morlet', warn=True): """ Return the power spectrum density for each channel The power spectrum density is computed with the chosen method Parameters ---------- fmin : None / float The minimum frequency of interest If None, set to 5/T, where T is the whole time interval Used to constrain the number of points per window deg : bool Flag indicating whether to return the phase in deg (vs rad) method : str Flag indicating which method to use for computation: - 'scipy-fourier': uses scipy.signal.spectrogram() (windowed fast fourier transform) - 'scipy-stft': uses scipy.signal.stft() (short time fourier transform) - 'scipy-wavelet': uses scipy.signal.cwt() (continuous wavelet transform) The following keyword args are fed to one of these scipy functions See the corresponding online scipy documentation for details on each function and its arguments window : None / str / tuple If method='scipy-fourier' Flag indicating which type of window to use detrend : None / str If method='scipy-fourier' Flag indicating whether and how to remove the trend of the signal nperseg : None / int If method='scipy-fourier' Number of points to the used for each window If None, deduced from fmin noverlap: If method='scipy-fourier' Number of points on which successive windows should overlap If None, nperseg-1 boundary: If method='scipy-stft' padded : If method='scipy-stft' d wave: None / str If method='scipy-wavelet' Return ------ tf : np.ndarray Time vector of the spectrogram (1D) f: np.ndarray frequency vector of the spectrogram (1D) lspect: list of np.ndarrays list of () spectrograms """ if self._isSpectral(): msg = "spectrogram not implemented yet for spectral data class" raise Exception(msg) tf, f, lpsd, lang = _comp.spectrogram(self.data, self.t, fmin=fmin, deg=deg, method=method, window=window, detrend=detrend, nperseg=nperseg, noverlap=noverlap, boundary=boundary, padded=padded, wave=wave, warn=warn) return tf, f, lpsd, lang
[ "def", "calc_spectrogram", "(", "self", ",", "fmin", "=", "None", ",", "method", "=", "'scipy-fourier'", ",", "deg", "=", "False", ",", "window", "=", "'hann'", ",", "detrend", "=", "'linear'", ",", "nperseg", "=", "None", ",", "noverlap", "=", "None", ...
Return the power spectrum density for each channel The power spectrum density is computed with the chosen method Parameters ---------- fmin : None / float The minimum frequency of interest If None, set to 5/T, where T is the whole time interval Used to constrain the number of points per window deg : bool Flag indicating whether to return the phase in deg (vs rad) method : str Flag indicating which method to use for computation: - 'scipy-fourier': uses scipy.signal.spectrogram() (windowed fast fourier transform) - 'scipy-stft': uses scipy.signal.stft() (short time fourier transform) - 'scipy-wavelet': uses scipy.signal.cwt() (continuous wavelet transform) The following keyword args are fed to one of these scipy functions See the corresponding online scipy documentation for details on each function and its arguments window : None / str / tuple If method='scipy-fourier' Flag indicating which type of window to use detrend : None / str If method='scipy-fourier' Flag indicating whether and how to remove the trend of the signal nperseg : None / int If method='scipy-fourier' Number of points to the used for each window If None, deduced from fmin noverlap: If method='scipy-fourier' Number of points on which successive windows should overlap If None, nperseg-1 boundary: If method='scipy-stft' padded : If method='scipy-stft' d wave: None / str If method='scipy-wavelet' Return ------ tf : np.ndarray Time vector of the spectrogram (1D) f: np.ndarray frequency vector of the spectrogram (1D) lspect: list of np.ndarrays list of () spectrograms
[ "Return", "the", "power", "spectrum", "density", "for", "each", "channel" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1682-L1754
train
33,037
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.plot_spectrogram
def plot_spectrogram(self, fmin=None, fmax=None, method='scipy-fourier', deg=False, window='hann', detrend='linear', nperseg=None, noverlap=None, boundary='constant', padded=True, wave='morlet', invert=True, plotmethod='imshow', cmap_f=None, cmap_img=None, ms=4, ntMax=None, nfMax=None, Bck=True, fs=None, dmargin=None, wintit=None, tit=None, vmin=None, vmax=None, normt=False, draw=True, connect=True, returnspect=False, warn=True): """ Plot the spectrogram of all channels with chosen method All non-plotting arguments are fed to self.calc_spectrogram() see self.calc_spectrogram? for details Parameters ---------- Return ------ kh : tofu.utils.HeyHandler The tofu KeyHandler object handling figure interactivity """ if self._isSpectral(): msg = "spectrogram not implemented yet for spectral data class" raise Exception(msg) tf, f, lpsd, lang = _comp.spectrogram(self.data, self.t, fmin=fmin, deg=deg, method=method, window=window, detrend=detrend, nperseg=nperseg, noverlap=noverlap, boundary=boundary, padded=padded, wave=wave, warn=warn) kh = _plot.Data_plot_spectrogram(self, tf, f, lpsd, lang, fmax=fmax, invert=invert, plotmethod=plotmethod, cmap_f=cmap_f, cmap_img=cmap_img, ms=ms, ntMax=ntMax, nfMax=nfMax, Bck=Bck, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, vmin=vmin, vmax=vmax, normt=normt, draw=draw, connect=connect) if returnspect: return kh, tf, f, lpsd, lang else: return kh
python
def plot_spectrogram(self, fmin=None, fmax=None, method='scipy-fourier', deg=False, window='hann', detrend='linear', nperseg=None, noverlap=None, boundary='constant', padded=True, wave='morlet', invert=True, plotmethod='imshow', cmap_f=None, cmap_img=None, ms=4, ntMax=None, nfMax=None, Bck=True, fs=None, dmargin=None, wintit=None, tit=None, vmin=None, vmax=None, normt=False, draw=True, connect=True, returnspect=False, warn=True): """ Plot the spectrogram of all channels with chosen method All non-plotting arguments are fed to self.calc_spectrogram() see self.calc_spectrogram? for details Parameters ---------- Return ------ kh : tofu.utils.HeyHandler The tofu KeyHandler object handling figure interactivity """ if self._isSpectral(): msg = "spectrogram not implemented yet for spectral data class" raise Exception(msg) tf, f, lpsd, lang = _comp.spectrogram(self.data, self.t, fmin=fmin, deg=deg, method=method, window=window, detrend=detrend, nperseg=nperseg, noverlap=noverlap, boundary=boundary, padded=padded, wave=wave, warn=warn) kh = _plot.Data_plot_spectrogram(self, tf, f, lpsd, lang, fmax=fmax, invert=invert, plotmethod=plotmethod, cmap_f=cmap_f, cmap_img=cmap_img, ms=ms, ntMax=ntMax, nfMax=nfMax, Bck=Bck, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, vmin=vmin, vmax=vmax, normt=normt, draw=draw, connect=connect) if returnspect: return kh, tf, f, lpsd, lang else: return kh
[ "def", "plot_spectrogram", "(", "self", ",", "fmin", "=", "None", ",", "fmax", "=", "None", ",", "method", "=", "'scipy-fourier'", ",", "deg", "=", "False", ",", "window", "=", "'hann'", ",", "detrend", "=", "'linear'", ",", "nperseg", "=", "None", ","...
Plot the spectrogram of all channels with chosen method All non-plotting arguments are fed to self.calc_spectrogram() see self.calc_spectrogram? for details Parameters ---------- Return ------ kh : tofu.utils.HeyHandler The tofu KeyHandler object handling figure interactivity
[ "Plot", "the", "spectrogram", "of", "all", "channels", "with", "chosen", "method" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1756-L1802
train
33,038
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.calc_svd
def calc_svd(self, lapack_driver='gesdd'): """ Return the SVD decomposition of data The input data np.ndarray shall be of dimension 2, with time as the first dimension, and the channels in the second Hence data should be of shape (nt, nch) Uses scipy.linalg.svd(), with: full_matrices = True compute_uv = True overwrite_a = False check_finite = True See scipy online doc for details Return ------ chronos: np.ndarray First arg (u) returned by scipy.linalg.svd() Contains the so-called 'chronos', of shape (nt, nt) i.e.: the time-dependent part of the decoposition s: np.ndarray Second arg (s) returned by scipy.linalg.svd() Contains the singular values, of shape (nch,) i.e.: the channel-dependent part of the decoposition topos: np.ndarray Third arg (v) returned by scipy.linalg.svd() Contains the so-called 'topos', of shape (nch, nch) i.e.: the channel-dependent part of the decoposition """ if self._isSpectral(): msg = "svd not implemented yet for spectral data class" raise Exception(msg) chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver) return u, s, v
python
def calc_svd(self, lapack_driver='gesdd'): """ Return the SVD decomposition of data The input data np.ndarray shall be of dimension 2, with time as the first dimension, and the channels in the second Hence data should be of shape (nt, nch) Uses scipy.linalg.svd(), with: full_matrices = True compute_uv = True overwrite_a = False check_finite = True See scipy online doc for details Return ------ chronos: np.ndarray First arg (u) returned by scipy.linalg.svd() Contains the so-called 'chronos', of shape (nt, nt) i.e.: the time-dependent part of the decoposition s: np.ndarray Second arg (s) returned by scipy.linalg.svd() Contains the singular values, of shape (nch,) i.e.: the channel-dependent part of the decoposition topos: np.ndarray Third arg (v) returned by scipy.linalg.svd() Contains the so-called 'topos', of shape (nch, nch) i.e.: the channel-dependent part of the decoposition """ if self._isSpectral(): msg = "svd not implemented yet for spectral data class" raise Exception(msg) chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver) return u, s, v
[ "def", "calc_svd", "(", "self", ",", "lapack_driver", "=", "'gesdd'", ")", ":", "if", "self", ".", "_isSpectral", "(", ")", ":", "msg", "=", "\"svd not implemented yet for spectral data class\"", "raise", "Exception", "(", "msg", ")", "chronos", ",", "s", ",",...
Return the SVD decomposition of data The input data np.ndarray shall be of dimension 2, with time as the first dimension, and the channels in the second Hence data should be of shape (nt, nch) Uses scipy.linalg.svd(), with: full_matrices = True compute_uv = True overwrite_a = False check_finite = True See scipy online doc for details Return ------ chronos: np.ndarray First arg (u) returned by scipy.linalg.svd() Contains the so-called 'chronos', of shape (nt, nt) i.e.: the time-dependent part of the decoposition s: np.ndarray Second arg (s) returned by scipy.linalg.svd() Contains the singular values, of shape (nch,) i.e.: the channel-dependent part of the decoposition topos: np.ndarray Third arg (v) returned by scipy.linalg.svd() Contains the so-called 'topos', of shape (nch, nch) i.e.: the channel-dependent part of the decoposition
[ "Return", "the", "SVD", "decomposition", "of", "data" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1804-L1839
train
33,039
ToFuProject/tofu
tofu/data/_core.py
DataAbstract.plot_svd
def plot_svd(self, lapack_driver='gesdd', modes=None, key=None, Bck=True, Lplot='In', cmap=None, vmin=None, vmax=None, cmap_topos=None, vmin_topos=None, vmax_topos=None, ntMax=None, nchMax=None, ms=4, inct=[1,10], incX=[1,5], incm=[1,5], lls=None, lct=None, lcch=None, lcm=None, cbck=None, invert=False, fmt_t='06.3f', fmt_X='01.0f', fmt_m='03.0f', fs=None, dmargin=None, labelpad=None, wintit=None, tit=None, fontsize=None, draw=True, connect=True): """ Plot the chosen modes of the svd decomposition All modes will be plotted, the keyword 'modes' is only used to determine the reference modes for computing a common scale for vizualisation Runs self.calc_svd() and then plots the result in an interactive figure """ if self._isSpectral(): msg = "svd not implemented yet for spectral data class" raise Exception(msg) # Computing (~0.2 s for 50 channels 1D and 1000 times) chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver) # Plotting (~11 s for 50 channels 1D and 1000 times) kh = _plot.Data_plot_svd(self, chronos, s, topos, modes=modes, key=key, Bck=Bck, Lplot=Lplot, cmap=cmap, vmin=vmin, vmax=vmax, cmap_topos=cmap_topos, vmin_topos=vmin_topos, vmax_topos=vmax_topos, ntMax=ntMax, nchMax=nchMax, ms=ms, inct=inct, incX=incX, incm=incm, lls=lls, lct=lct, lcch=lcch, lcm=lcm, cbck=cbck, invert=invert, fmt_t=fmt_t, fmt_X=fmt_X, fmt_m=fmt_m, fs=fs, dmargin=dmargin, labelpad=labelpad, wintit=wintit, tit=tit, fontsize=fontsize, draw=draw, connect=connect) return kh
python
def plot_svd(self, lapack_driver='gesdd', modes=None, key=None, Bck=True, Lplot='In', cmap=None, vmin=None, vmax=None, cmap_topos=None, vmin_topos=None, vmax_topos=None, ntMax=None, nchMax=None, ms=4, inct=[1,10], incX=[1,5], incm=[1,5], lls=None, lct=None, lcch=None, lcm=None, cbck=None, invert=False, fmt_t='06.3f', fmt_X='01.0f', fmt_m='03.0f', fs=None, dmargin=None, labelpad=None, wintit=None, tit=None, fontsize=None, draw=True, connect=True): """ Plot the chosen modes of the svd decomposition All modes will be plotted, the keyword 'modes' is only used to determine the reference modes for computing a common scale for vizualisation Runs self.calc_svd() and then plots the result in an interactive figure """ if self._isSpectral(): msg = "svd not implemented yet for spectral data class" raise Exception(msg) # Computing (~0.2 s for 50 channels 1D and 1000 times) chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver) # Plotting (~11 s for 50 channels 1D and 1000 times) kh = _plot.Data_plot_svd(self, chronos, s, topos, modes=modes, key=key, Bck=Bck, Lplot=Lplot, cmap=cmap, vmin=vmin, vmax=vmax, cmap_topos=cmap_topos, vmin_topos=vmin_topos, vmax_topos=vmax_topos, ntMax=ntMax, nchMax=nchMax, ms=ms, inct=inct, incX=incX, incm=incm, lls=lls, lct=lct, lcch=lcch, lcm=lcm, cbck=cbck, invert=invert, fmt_t=fmt_t, fmt_X=fmt_X, fmt_m=fmt_m, fs=fs, dmargin=dmargin, labelpad=labelpad, wintit=wintit, tit=tit, fontsize=fontsize, draw=draw, connect=connect) return kh
[ "def", "plot_svd", "(", "self", ",", "lapack_driver", "=", "'gesdd'", ",", "modes", "=", "None", ",", "key", "=", "None", ",", "Bck", "=", "True", ",", "Lplot", "=", "'In'", ",", "cmap", "=", "None", ",", "vmin", "=", "None", ",", "vmax", "=", "N...
Plot the chosen modes of the svd decomposition All modes will be plotted, the keyword 'modes' is only used to determine the reference modes for computing a common scale for vizualisation Runs self.calc_svd() and then plots the result in an interactive figure
[ "Plot", "the", "chosen", "modes", "of", "the", "svd", "decomposition" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1841-L1878
train
33,040
ToFuProject/tofu
tofu/data/_core.py
DataSpectro.plot
def plot(self, key=None, invert=None, plotmethod='imshow', cmap=plt.cm.gray, ms=4, Max=None, fs=None, dmargin=None, wintit=None, draw=True, connect=True): """ Plot the data content in a predefined figure """ dax, KH = _plot.Data_plot(self, key=key, invert=invert, Max=Max, plotmethod=plotmethod, cmap=cmap, ms=ms, fs=fs, dmargin=dmargin, wintit=wintit, draw=draw, connect=connect) return dax, KH
python
def plot(self, key=None, invert=None, plotmethod='imshow', cmap=plt.cm.gray, ms=4, Max=None, fs=None, dmargin=None, wintit=None, draw=True, connect=True): """ Plot the data content in a predefined figure """ dax, KH = _plot.Data_plot(self, key=key, invert=invert, Max=Max, plotmethod=plotmethod, cmap=cmap, ms=ms, fs=fs, dmargin=dmargin, wintit=wintit, draw=draw, connect=connect) return dax, KH
[ "def", "plot", "(", "self", ",", "key", "=", "None", ",", "invert", "=", "None", ",", "plotmethod", "=", "'imshow'", ",", "cmap", "=", "plt", ".", "cm", ".", "gray", ",", "ms", "=", "4", ",", "Max", "=", "None", ",", "fs", "=", "None", ",", "...
Plot the data content in a predefined figure
[ "Plot", "the", "data", "content", "in", "a", "predefined", "figure" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L2446-L2455
train
33,041
ToFuProject/tofu
tofu/geom/_comp.py
LOS_PRMin
def LOS_PRMin(Ds, dus, kPOut=None, Eps=1.e-12, Test=True): """ Compute the point on the LOS where the major radius is minimum """ if Test: assert Ds.ndim in [1,2] and 3 in Ds.shape and Ds.shape==dus.shape assert kPOut is None or (Ds.ndim==1 and not hasattr(kPOut,'__iter__')) or (Ds.ndim==2 and kPOut.shape==(Ds.size/3,)) v = Ds.ndim==1 if v: Ds = Ds.reshape((3,1)) dus = dus.reshape((3,1)) if kPOut is not None: kPOut = np.array([kPOut]) kRMin = np.nan*np.ones((Ds.shape[1],)) uparN = np.sqrt(dus[0,:]**2 + dus[1,:]**2) # Case with u vertical ind = uparN>Eps kRMin[~ind] = 0. # Else kRMin[ind] = -(dus[0,ind]*Ds[0,ind]+dus[1,ind]*Ds[1,ind])/uparN[ind]**2 # Check kRMin[kRMin<=0.] = 0. if kPOut is not None: kRMin[kRMin>kPOut] = kPOut[kRMin>kPOut] if v: kRMin = kRMin[0] return kRMin
python
def LOS_PRMin(Ds, dus, kPOut=None, Eps=1.e-12, Test=True): """ Compute the point on the LOS where the major radius is minimum """ if Test: assert Ds.ndim in [1,2] and 3 in Ds.shape and Ds.shape==dus.shape assert kPOut is None or (Ds.ndim==1 and not hasattr(kPOut,'__iter__')) or (Ds.ndim==2 and kPOut.shape==(Ds.size/3,)) v = Ds.ndim==1 if v: Ds = Ds.reshape((3,1)) dus = dus.reshape((3,1)) if kPOut is not None: kPOut = np.array([kPOut]) kRMin = np.nan*np.ones((Ds.shape[1],)) uparN = np.sqrt(dus[0,:]**2 + dus[1,:]**2) # Case with u vertical ind = uparN>Eps kRMin[~ind] = 0. # Else kRMin[ind] = -(dus[0,ind]*Ds[0,ind]+dus[1,ind]*Ds[1,ind])/uparN[ind]**2 # Check kRMin[kRMin<=0.] = 0. if kPOut is not None: kRMin[kRMin>kPOut] = kPOut[kRMin>kPOut] if v: kRMin = kRMin[0] return kRMin
[ "def", "LOS_PRMin", "(", "Ds", ",", "dus", ",", "kPOut", "=", "None", ",", "Eps", "=", "1.e-12", ",", "Test", "=", "True", ")", ":", "if", "Test", ":", "assert", "Ds", ".", "ndim", "in", "[", "1", ",", "2", "]", "and", "3", "in", "Ds", ".", ...
Compute the point on the LOS where the major radius is minimum
[ "Compute", "the", "point", "on", "the", "LOS", "where", "the", "major", "radius", "is", "minimum" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_comp.py#L306-L336
train
33,042
ToFuProject/tofu
tofu/geom/_comp.py
LOS_get_sample
def LOS_get_sample(D, u, dL, DL=None, dLMode='abs', method='sum', Test=True): """ Return the sampled line, with the specified method 'linspace': return the N+1 edges, including the first and last point 'sum' : return the N middle of the segments 'simps': return the N+1 egdes, where N has to be even (scipy.simpson requires an even number of intervals) 'romb' : return the N+1 edges, where N+1 = 2**k+1 (fed to scipy.romb for integration) """ if Test: assert all([type(dd) is np.ndarray and dd.shape==(3,) for dd in [D,u]]) assert not hasattr(dL,'__iter__') assert DL is None or all([hasattr(DL,'__iter__'), len(DL)==2, all([not hasattr(dd,'__iter__') for dd in DL])]) assert dLMode in ['abs','rel'] assert type(method) is str and method in ['linspace','sum','simps','romb'] # Compute the minimum number of intervals to satisfy the specified resolution N = int(np.ceil((DL[1]-DL[0])/dL)) if dLMode=='abs' else int(np.ceil(1./dL)) # Modify N according to the desired method if method=='simps': N = N if N%2==0 else N+1 elif method=='romb': N = 2**int(np.ceil(np.log(N)/np.log(2.))) # Derive k and dLr if method=='sum': dLr = (DL[1]-DL[0])/N k = DL[0] + (0.5+np.arange(0,N))*dLr else: k, dLr = np.linspace(DL[0], DL[1], N+1, endpoint=True, retstep=True, dtype=float) Pts = D[:,np.newaxis] + k[np.newaxis,:]*u[:,np.newaxis] return Pts, k, dLr
python
def LOS_get_sample(D, u, dL, DL=None, dLMode='abs', method='sum', Test=True): """ Return the sampled line, with the specified method 'linspace': return the N+1 edges, including the first and last point 'sum' : return the N middle of the segments 'simps': return the N+1 egdes, where N has to be even (scipy.simpson requires an even number of intervals) 'romb' : return the N+1 edges, where N+1 = 2**k+1 (fed to scipy.romb for integration) """ if Test: assert all([type(dd) is np.ndarray and dd.shape==(3,) for dd in [D,u]]) assert not hasattr(dL,'__iter__') assert DL is None or all([hasattr(DL,'__iter__'), len(DL)==2, all([not hasattr(dd,'__iter__') for dd in DL])]) assert dLMode in ['abs','rel'] assert type(method) is str and method in ['linspace','sum','simps','romb'] # Compute the minimum number of intervals to satisfy the specified resolution N = int(np.ceil((DL[1]-DL[0])/dL)) if dLMode=='abs' else int(np.ceil(1./dL)) # Modify N according to the desired method if method=='simps': N = N if N%2==0 else N+1 elif method=='romb': N = 2**int(np.ceil(np.log(N)/np.log(2.))) # Derive k and dLr if method=='sum': dLr = (DL[1]-DL[0])/N k = DL[0] + (0.5+np.arange(0,N))*dLr else: k, dLr = np.linspace(DL[0], DL[1], N+1, endpoint=True, retstep=True, dtype=float) Pts = D[:,np.newaxis] + k[np.newaxis,:]*u[:,np.newaxis] return Pts, k, dLr
[ "def", "LOS_get_sample", "(", "D", ",", "u", ",", "dL", ",", "DL", "=", "None", ",", "dLMode", "=", "'abs'", ",", "method", "=", "'sum'", ",", "Test", "=", "True", ")", ":", "if", "Test", ":", "assert", "all", "(", "[", "type", "(", "dd", ")", ...
Return the sampled line, with the specified method 'linspace': return the N+1 edges, including the first and last point 'sum' : return the N middle of the segments 'simps': return the N+1 egdes, where N has to be even (scipy.simpson requires an even number of intervals) 'romb' : return the N+1 edges, where N+1 = 2**k+1 (fed to scipy.romb for integration)
[ "Return", "the", "sampled", "line", "with", "the", "specified", "method" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_comp.py#L426-L456
train
33,043
ToFuProject/tofu
tofu/geom/_core.py
Struct.isInside
def isInside(self, pts, In='(X,Y,Z)'): """ Return an array of booleans indicating whether each point lies inside the Struct volume Tests for each point whether it lies inside the Struct object. The points coordinates can be provided in 2D or 3D You must specify which coordinate system is used with 'In' kwdarg. An array of boolean flags is returned. Parameters ---------- pts : np.ndarray (2,N) or (3,N) array, coordinates of the points to be tested In : str Flag indicating the coordinate system in which pts are provided e.g.: '(X,Y,Z)' or '(R,Z)' Returns ------- ind : np.ndarray (N,) array of booleans, True if a point is inside the volume """ ind = _GG._Ves_isInside(pts, self.Poly, Lim=self.Lim, nLim=self._dgeom['noccur'], VType=self.Id.Type, In=In, Test=True) return ind
python
def isInside(self, pts, In='(X,Y,Z)'): """ Return an array of booleans indicating whether each point lies inside the Struct volume Tests for each point whether it lies inside the Struct object. The points coordinates can be provided in 2D or 3D You must specify which coordinate system is used with 'In' kwdarg. An array of boolean flags is returned. Parameters ---------- pts : np.ndarray (2,N) or (3,N) array, coordinates of the points to be tested In : str Flag indicating the coordinate system in which pts are provided e.g.: '(X,Y,Z)' or '(R,Z)' Returns ------- ind : np.ndarray (N,) array of booleans, True if a point is inside the volume """ ind = _GG._Ves_isInside(pts, self.Poly, Lim=self.Lim, nLim=self._dgeom['noccur'], VType=self.Id.Type, In=In, Test=True) return ind
[ "def", "isInside", "(", "self", ",", "pts", ",", "In", "=", "'(X,Y,Z)'", ")", ":", "ind", "=", "_GG", ".", "_Ves_isInside", "(", "pts", ",", "self", ".", "Poly", ",", "Lim", "=", "self", ".", "Lim", ",", "nLim", "=", "self", ".", "_dgeom", "[", ...
Return an array of booleans indicating whether each point lies inside the Struct volume Tests for each point whether it lies inside the Struct object. The points coordinates can be provided in 2D or 3D You must specify which coordinate system is used with 'In' kwdarg. An array of boolean flags is returned. Parameters ---------- pts : np.ndarray (2,N) or (3,N) array, coordinates of the points to be tested In : str Flag indicating the coordinate system in which pts are provided e.g.: '(X,Y,Z)' or '(R,Z)' Returns ------- ind : np.ndarray (N,) array of booleans, True if a point is inside the volume
[ "Return", "an", "array", "of", "booleans", "indicating", "whether", "each", "point", "lies", "inside", "the", "Struct", "volume" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L691-L718
train
33,044
ToFuProject/tofu
tofu/geom/_core.py
Struct.get_InsideConvexPoly
def get_InsideConvexPoly(self, RelOff=_def.TorRelOff, ZLim='Def', Spline=True, Splprms=_def.TorSplprms, NP=_def.TorInsideNP, Plot=False, Test=True): """ Return a polygon that is a smaller and smoothed approximation of Ves.Poly, useful for excluding the divertor region in a Tokamak For some uses, it can be practical to approximate the polygon defining the Ves object (which can be non-convex, like with a divertor), by a simpler, sligthly smaller and convex polygon. This method provides a fast solution for computing such a proxy. Parameters ---------- RelOff : float Fraction by which an homothetic polygon should be reduced (1.-RelOff)*(Poly-BaryS) ZLim : None / str / tuple Flag indicating what limits shall be put to the height of the polygon (used for excluding divertor) Spline : bool Flag indiating whether the reduced and truncated polygon shall be smoothed by 2D b-spline curves Splprms : list List of 3 parameters to be used for the smoothing [weights,smoothness,b-spline order], fed to scipy.interpolate.splprep() NP : int Number of points to be used to define the smoothed polygon Plot : bool Flag indicating whether the result shall be plotted for visual inspection Test : bool Flag indicating whether the inputs should be tested for conformity Returns ------- Poly : np.ndarray (2,N) polygon resulting from homothetic transform, truncating and optional smoothing """ return _comp._Ves_get_InsideConvexPoly(self.Poly_closed, self.dgeom['P2Min'], self.dgeom['P2Max'], self.dgeom['BaryS'], RelOff=RelOff, ZLim=ZLim, Spline=Spline, Splprms=Splprms, NP=NP, Plot=Plot, Test=Test)
python
def get_InsideConvexPoly(self, RelOff=_def.TorRelOff, ZLim='Def', Spline=True, Splprms=_def.TorSplprms, NP=_def.TorInsideNP, Plot=False, Test=True): """ Return a polygon that is a smaller and smoothed approximation of Ves.Poly, useful for excluding the divertor region in a Tokamak For some uses, it can be practical to approximate the polygon defining the Ves object (which can be non-convex, like with a divertor), by a simpler, sligthly smaller and convex polygon. This method provides a fast solution for computing such a proxy. Parameters ---------- RelOff : float Fraction by which an homothetic polygon should be reduced (1.-RelOff)*(Poly-BaryS) ZLim : None / str / tuple Flag indicating what limits shall be put to the height of the polygon (used for excluding divertor) Spline : bool Flag indiating whether the reduced and truncated polygon shall be smoothed by 2D b-spline curves Splprms : list List of 3 parameters to be used for the smoothing [weights,smoothness,b-spline order], fed to scipy.interpolate.splprep() NP : int Number of points to be used to define the smoothed polygon Plot : bool Flag indicating whether the result shall be plotted for visual inspection Test : bool Flag indicating whether the inputs should be tested for conformity Returns ------- Poly : np.ndarray (2,N) polygon resulting from homothetic transform, truncating and optional smoothing """ return _comp._Ves_get_InsideConvexPoly(self.Poly_closed, self.dgeom['P2Min'], self.dgeom['P2Max'], self.dgeom['BaryS'], RelOff=RelOff, ZLim=ZLim, Spline=Spline, Splprms=Splprms, NP=NP, Plot=Plot, Test=Test)
[ "def", "get_InsideConvexPoly", "(", "self", ",", "RelOff", "=", "_def", ".", "TorRelOff", ",", "ZLim", "=", "'Def'", ",", "Spline", "=", "True", ",", "Splprms", "=", "_def", ".", "TorSplprms", ",", "NP", "=", "_def", ".", "TorInsideNP", ",", "Plot", "=...
Return a polygon that is a smaller and smoothed approximation of Ves.Poly, useful for excluding the divertor region in a Tokamak For some uses, it can be practical to approximate the polygon defining the Ves object (which can be non-convex, like with a divertor), by a simpler, sligthly smaller and convex polygon. This method provides a fast solution for computing such a proxy. Parameters ---------- RelOff : float Fraction by which an homothetic polygon should be reduced (1.-RelOff)*(Poly-BaryS) ZLim : None / str / tuple Flag indicating what limits shall be put to the height of the polygon (used for excluding divertor) Spline : bool Flag indiating whether the reduced and truncated polygon shall be smoothed by 2D b-spline curves Splprms : list List of 3 parameters to be used for the smoothing [weights,smoothness,b-spline order], fed to scipy.interpolate.splprep() NP : int Number of points to be used to define the smoothed polygon Plot : bool Flag indicating whether the result shall be plotted for visual inspection Test : bool Flag indicating whether the inputs should be tested for conformity Returns ------- Poly : np.ndarray (2,N) polygon resulting from homothetic transform, truncating and optional smoothing
[ "Return", "a", "polygon", "that", "is", "a", "smaller", "and", "smoothed", "approximation", "of", "Ves", ".", "Poly", "useful", "for", "excluding", "the", "divertor", "region", "in", "a", "Tokamak" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L721-L758
train
33,045
ToFuProject/tofu
tofu/geom/_core.py
Struct.get_sampleEdge
def get_sampleEdge(self, res, DS=None, resMode='abs', offsetIn=0.): """ Sample the polygon edges, with resolution res Sample each segment of the 2D polygon Sampling can be limited to a subdomain defined by DS """ pts, dlr, ind = _comp._Ves_get_sampleEdge(self.Poly, res, DS=DS, dLMode=resMode, DIn=offsetIn, VIn=self.dgeom['VIn'], margin=1.e-9) return pts, dlr, ind
python
def get_sampleEdge(self, res, DS=None, resMode='abs', offsetIn=0.): """ Sample the polygon edges, with resolution res Sample each segment of the 2D polygon Sampling can be limited to a subdomain defined by DS """ pts, dlr, ind = _comp._Ves_get_sampleEdge(self.Poly, res, DS=DS, dLMode=resMode, DIn=offsetIn, VIn=self.dgeom['VIn'], margin=1.e-9) return pts, dlr, ind
[ "def", "get_sampleEdge", "(", "self", ",", "res", ",", "DS", "=", "None", ",", "resMode", "=", "'abs'", ",", "offsetIn", "=", "0.", ")", ":", "pts", ",", "dlr", ",", "ind", "=", "_comp", ".", "_Ves_get_sampleEdge", "(", "self", ".", "Poly", ",", "r...
Sample the polygon edges, with resolution res Sample each segment of the 2D polygon Sampling can be limited to a subdomain defined by DS
[ "Sample", "the", "polygon", "edges", "with", "resolution", "res" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L760-L770
train
33,046
ToFuProject/tofu
tofu/geom/_core.py
Struct.get_sampleCross
def get_sampleCross(self, res, DS=None, resMode='abs', ind=None): """ Sample, with resolution res, the 2D cross-section The sampling domain can be limited by DS or ind """ args = [self.Poly, self.dgeom['P1Min'][0], self.dgeom['P1Max'][0], self.dgeom['P2Min'][1], self.dgeom['P2Max'][1], res] kwdargs = dict(DS=DS, dSMode=resMode, ind=ind, margin=1.e-9) pts, dS, ind, reseff = _comp._Ves_get_sampleCross(*args, **kwdargs) return pts, dS, ind, reseff
python
def get_sampleCross(self, res, DS=None, resMode='abs', ind=None): """ Sample, with resolution res, the 2D cross-section The sampling domain can be limited by DS or ind """ args = [self.Poly, self.dgeom['P1Min'][0], self.dgeom['P1Max'][0], self.dgeom['P2Min'][1], self.dgeom['P2Max'][1], res] kwdargs = dict(DS=DS, dSMode=resMode, ind=ind, margin=1.e-9) pts, dS, ind, reseff = _comp._Ves_get_sampleCross(*args, **kwdargs) return pts, dS, ind, reseff
[ "def", "get_sampleCross", "(", "self", ",", "res", ",", "DS", "=", "None", ",", "resMode", "=", "'abs'", ",", "ind", "=", "None", ")", ":", "args", "=", "[", "self", ".", "Poly", ",", "self", ".", "dgeom", "[", "'P1Min'", "]", "[", "0", "]", ",...
Sample, with resolution res, the 2D cross-section The sampling domain can be limited by DS or ind
[ "Sample", "with", "resolution", "res", "the", "2D", "cross", "-", "section" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L772-L781
train
33,047
ToFuProject/tofu
tofu/geom/_core.py
Struct.get_sampleS
def get_sampleS(self, res, DS=None, resMode='abs', ind=None, offsetIn=0., Out='(X,Y,Z)', Ind=None): """ Sample, with resolution res, the surface defined by DS or ind An optionnal offset perpendicular to the surface can be used (offsetIn>0 => inwards) Parameters ---------- res : float / list of 2 floats Desired resolution of the surfacic sample float : same resolution for all directions of the sample list : [dl,dXPhi] where: dl : res. along polygon contours (cross-section) dXPhi : res. along axis (toroidal/linear direction) DS : None / list of 3 lists of 2 floats Limits of the domain in which the sample should be computed None : whole surface of the object list : [D1,D2,D3], where Di is a len()=2 list (increasing floats, setting limits along coordinate i) [DR,DZ,DPhi]: in toroidal geometry (self.Id.Type=='Tor') [DX,DY,DZ] : in linear geometry (self.Id.Type=='Lin') resMode : str Flag, specifies if res is absolute or relative to element sizes 'abs' : res is an absolute distance 'rel' : if res=0.1, each polygon segment is divided in 10, as is the toroidal/linear length ind : None / np.ndarray of int If provided, DS is ignored and the sample points corresponding to the provided indices are returned Example (assuming obj is a Ves object) > # We create a 5x5 cm2 sample of the whole surface > pts, dS, ind, reseff = obj.get_sample(0.05) > # Perform operations, save only the points indices (save space) > ... > # Retrieve the points from their indices (requires same res) > pts2, dS2, ind2, reseff2 = obj.get_sample(0.05, ind=ind) > np.allclose(pts,pts2) True offsetIn: float Offset distance from the actual surface of the object Inwards if positive Useful to avoid numerical errors Out : str Flag indicating the coordinate system of returned points e.g. : '(X,Y,Z)' or '(R,Z,Phi)' Ind : None / iterable of ints Array of indices of the entities to be considered (only when multiple entities, i.e.: self.nLim>1) Returns ------- pts : np.ndarray / list of np.ndarrays Sample points coordinates, as a (3,N) array. A list is returned if the object has multiple entities dS : np.ndarray / list of np.ndarrays The surface (in m^2) associated to each point ind : np.ndarray / list of np.ndarrays The index of each point reseff : np.ndarray / list of np.ndarrays Effective resolution in both directions after sample computation """ if Ind is not None: assert self.dgeom['Multi'] kwdargs = dict(DS=DS, dSMode=resMode, ind=ind, DIn=offsetIn, VIn=self.dgeom['VIn'], VType=self.Id.Type, VLim=np.ascontiguousarray(self.Lim), nVLim=self.noccur, Out=Out, margin=1.e-9, Multi=self.dgeom['Multi'], Ind=Ind) args = [self.Poly, self.dgeom['P1Min'][0], self.dgeom['P1Max'][0], self.dgeom['P2Min'][1], self.dgeom['P2Max'][1], res] pts, dS, ind, reseff = _comp._Ves_get_sampleS(*args, **kwdargs) return pts, dS, ind, reseff
python
def get_sampleS(self, res, DS=None, resMode='abs', ind=None, offsetIn=0., Out='(X,Y,Z)', Ind=None): """ Sample, with resolution res, the surface defined by DS or ind An optionnal offset perpendicular to the surface can be used (offsetIn>0 => inwards) Parameters ---------- res : float / list of 2 floats Desired resolution of the surfacic sample float : same resolution for all directions of the sample list : [dl,dXPhi] where: dl : res. along polygon contours (cross-section) dXPhi : res. along axis (toroidal/linear direction) DS : None / list of 3 lists of 2 floats Limits of the domain in which the sample should be computed None : whole surface of the object list : [D1,D2,D3], where Di is a len()=2 list (increasing floats, setting limits along coordinate i) [DR,DZ,DPhi]: in toroidal geometry (self.Id.Type=='Tor') [DX,DY,DZ] : in linear geometry (self.Id.Type=='Lin') resMode : str Flag, specifies if res is absolute or relative to element sizes 'abs' : res is an absolute distance 'rel' : if res=0.1, each polygon segment is divided in 10, as is the toroidal/linear length ind : None / np.ndarray of int If provided, DS is ignored and the sample points corresponding to the provided indices are returned Example (assuming obj is a Ves object) > # We create a 5x5 cm2 sample of the whole surface > pts, dS, ind, reseff = obj.get_sample(0.05) > # Perform operations, save only the points indices (save space) > ... > # Retrieve the points from their indices (requires same res) > pts2, dS2, ind2, reseff2 = obj.get_sample(0.05, ind=ind) > np.allclose(pts,pts2) True offsetIn: float Offset distance from the actual surface of the object Inwards if positive Useful to avoid numerical errors Out : str Flag indicating the coordinate system of returned points e.g. : '(X,Y,Z)' or '(R,Z,Phi)' Ind : None / iterable of ints Array of indices of the entities to be considered (only when multiple entities, i.e.: self.nLim>1) Returns ------- pts : np.ndarray / list of np.ndarrays Sample points coordinates, as a (3,N) array. A list is returned if the object has multiple entities dS : np.ndarray / list of np.ndarrays The surface (in m^2) associated to each point ind : np.ndarray / list of np.ndarrays The index of each point reseff : np.ndarray / list of np.ndarrays Effective resolution in both directions after sample computation """ if Ind is not None: assert self.dgeom['Multi'] kwdargs = dict(DS=DS, dSMode=resMode, ind=ind, DIn=offsetIn, VIn=self.dgeom['VIn'], VType=self.Id.Type, VLim=np.ascontiguousarray(self.Lim), nVLim=self.noccur, Out=Out, margin=1.e-9, Multi=self.dgeom['Multi'], Ind=Ind) args = [self.Poly, self.dgeom['P1Min'][0], self.dgeom['P1Max'][0], self.dgeom['P2Min'][1], self.dgeom['P2Max'][1], res] pts, dS, ind, reseff = _comp._Ves_get_sampleS(*args, **kwdargs) return pts, dS, ind, reseff
[ "def", "get_sampleS", "(", "self", ",", "res", ",", "DS", "=", "None", ",", "resMode", "=", "'abs'", ",", "ind", "=", "None", ",", "offsetIn", "=", "0.", ",", "Out", "=", "'(X,Y,Z)'", ",", "Ind", "=", "None", ")", ":", "if", "Ind", "is", "not", ...
Sample, with resolution res, the surface defined by DS or ind An optionnal offset perpendicular to the surface can be used (offsetIn>0 => inwards) Parameters ---------- res : float / list of 2 floats Desired resolution of the surfacic sample float : same resolution for all directions of the sample list : [dl,dXPhi] where: dl : res. along polygon contours (cross-section) dXPhi : res. along axis (toroidal/linear direction) DS : None / list of 3 lists of 2 floats Limits of the domain in which the sample should be computed None : whole surface of the object list : [D1,D2,D3], where Di is a len()=2 list (increasing floats, setting limits along coordinate i) [DR,DZ,DPhi]: in toroidal geometry (self.Id.Type=='Tor') [DX,DY,DZ] : in linear geometry (self.Id.Type=='Lin') resMode : str Flag, specifies if res is absolute or relative to element sizes 'abs' : res is an absolute distance 'rel' : if res=0.1, each polygon segment is divided in 10, as is the toroidal/linear length ind : None / np.ndarray of int If provided, DS is ignored and the sample points corresponding to the provided indices are returned Example (assuming obj is a Ves object) > # We create a 5x5 cm2 sample of the whole surface > pts, dS, ind, reseff = obj.get_sample(0.05) > # Perform operations, save only the points indices (save space) > ... > # Retrieve the points from their indices (requires same res) > pts2, dS2, ind2, reseff2 = obj.get_sample(0.05, ind=ind) > np.allclose(pts,pts2) True offsetIn: float Offset distance from the actual surface of the object Inwards if positive Useful to avoid numerical errors Out : str Flag indicating the coordinate system of returned points e.g. : '(X,Y,Z)' or '(R,Z,Phi)' Ind : None / iterable of ints Array of indices of the entities to be considered (only when multiple entities, i.e.: self.nLim>1) Returns ------- pts : np.ndarray / list of np.ndarrays Sample points coordinates, as a (3,N) array. A list is returned if the object has multiple entities dS : np.ndarray / list of np.ndarrays The surface (in m^2) associated to each point ind : np.ndarray / list of np.ndarrays The index of each point reseff : np.ndarray / list of np.ndarrays Effective resolution in both directions after sample computation
[ "Sample", "with", "resolution", "res", "the", "surface", "defined", "by", "DS", "or", "ind" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L783-L855
train
33,048
ToFuProject/tofu
tofu/geom/_core.py
Struct.get_sampleV
def get_sampleV(self, res, DV=None, resMode='abs', ind=None, Out='(X,Y,Z)'): """ Sample, with resolution res, the volume defined by DV or ind """ args = [self.Poly, self.dgeom['P1Min'][0], self.dgeom['P1Max'][0], self.dgeom['P2Min'][1], self.dgeom['P2Max'][1], res] kwdargs = dict(DV=DV, dVMode=resMode, ind=ind, VType=self.Id.Type, VLim=self.Lim, Out=Out, margin=1.e-9) pts, dV, ind, reseff = _comp._Ves_get_sampleV(*args, **kwdargs) return pts, dV, ind, reseff
python
def get_sampleV(self, res, DV=None, resMode='abs', ind=None, Out='(X,Y,Z)'): """ Sample, with resolution res, the volume defined by DV or ind """ args = [self.Poly, self.dgeom['P1Min'][0], self.dgeom['P1Max'][0], self.dgeom['P2Min'][1], self.dgeom['P2Max'][1], res] kwdargs = dict(DV=DV, dVMode=resMode, ind=ind, VType=self.Id.Type, VLim=self.Lim, Out=Out, margin=1.e-9) pts, dV, ind, reseff = _comp._Ves_get_sampleV(*args, **kwdargs) return pts, dV, ind, reseff
[ "def", "get_sampleV", "(", "self", ",", "res", ",", "DV", "=", "None", ",", "resMode", "=", "'abs'", ",", "ind", "=", "None", ",", "Out", "=", "'(X,Y,Z)'", ")", ":", "args", "=", "[", "self", ".", "Poly", ",", "self", ".", "dgeom", "[", "'P1Min'"...
Sample, with resolution res, the volume defined by DV or ind
[ "Sample", "with", "resolution", "res", "the", "volume", "defined", "by", "DV", "or", "ind" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L857-L865
train
33,049
ToFuProject/tofu
tofu/geom/_core.py
Struct.plot
def plot(self, lax=None, proj='all', element='PIBsBvV', dP=None, dI=_def.TorId, dBs=_def.TorBsd, dBv=_def.TorBvd, dVect=_def.TorVind, dIHor=_def.TorITord, dBsHor=_def.TorBsTord, dBvHor=_def.TorBvTord, Lim=None, Nstep=_def.TorNTheta, dLeg=_def.TorLegd, indices=False, draw=True, fs=None, wintit=None, Test=True): """ Plot the polygon defining the vessel, in chosen projection Generic method for plotting the Ves object The projections to be plotted, the elements to plot can be specified Dictionaries of properties for each elements can also be specified If an ax is not provided a default one is created. Parameters ---------- Lax : list or plt.Axes The axes to be used for plotting Provide a list of 2 axes if proj='All' If None a new figure with axes is created proj : str Flag specifying the kind of projection - 'Cross' : cross-section projection - 'Hor' : horizontal projection - 'All' : both - '3d' : a 3d matplotlib plot element : str Flag specifying which elements to plot Each capital letter corresponds to an element: * 'P': polygon * 'I': point used as a reference for impact parameters * 'Bs': (surfacic) center of mass * 'Bv': (volumic) center of mass for Tor type * 'V': vector pointing inward perpendicular to each segment dP : dict / None Dict of properties for plotting the polygon Fed to plt.Axes.plot() or plt.plot_surface() if proj='3d' dI : dict / None Dict of properties for plotting point 'I' in Cross-section projection dIHor : dict / None Dict of properties for plotting point 'I' in horizontal projection dBs : dict / None Dict of properties for plotting point 'Bs' in Cross-section projection dBsHor : dict / None Dict of properties for plotting point 'Bs' in horizontal projection dBv : dict / None Dict of properties for plotting point 'Bv' in Cross-section projection dBvHor : dict / None Dict of properties for plotting point 'Bv' in horizontal projection dVect : dict / None Dict of properties for plotting point 'V' in cross-section projection dLeg : dict / None Dict of properties for plotting the legend, fed to plt.legend() The legend is not plotted if None Lim : list or tuple Array of a lower and upper limit of angle (rad.) or length for plotting the '3d' proj Nstep : int Number of points for sampling in ignorable coordinate (toroidal angle or length) draw : bool Flag indicating whether the fig.canvas.draw() shall be called automatically a4 : bool Flag indicating whether the figure should be plotted in a4 dimensions for printing Test : bool Flag indicating whether the inputs should be tested for conformity Returns ------- La list / plt.Axes Handles of the axes used for plotting (list if several axes where used) """ kwdargs = locals() lout = ['self'] for k in lout: del kwdargs[k] return _plot.Struct_plot(self, **kwdargs)
python
def plot(self, lax=None, proj='all', element='PIBsBvV', dP=None, dI=_def.TorId, dBs=_def.TorBsd, dBv=_def.TorBvd, dVect=_def.TorVind, dIHor=_def.TorITord, dBsHor=_def.TorBsTord, dBvHor=_def.TorBvTord, Lim=None, Nstep=_def.TorNTheta, dLeg=_def.TorLegd, indices=False, draw=True, fs=None, wintit=None, Test=True): """ Plot the polygon defining the vessel, in chosen projection Generic method for plotting the Ves object The projections to be plotted, the elements to plot can be specified Dictionaries of properties for each elements can also be specified If an ax is not provided a default one is created. Parameters ---------- Lax : list or plt.Axes The axes to be used for plotting Provide a list of 2 axes if proj='All' If None a new figure with axes is created proj : str Flag specifying the kind of projection - 'Cross' : cross-section projection - 'Hor' : horizontal projection - 'All' : both - '3d' : a 3d matplotlib plot element : str Flag specifying which elements to plot Each capital letter corresponds to an element: * 'P': polygon * 'I': point used as a reference for impact parameters * 'Bs': (surfacic) center of mass * 'Bv': (volumic) center of mass for Tor type * 'V': vector pointing inward perpendicular to each segment dP : dict / None Dict of properties for plotting the polygon Fed to plt.Axes.plot() or plt.plot_surface() if proj='3d' dI : dict / None Dict of properties for plotting point 'I' in Cross-section projection dIHor : dict / None Dict of properties for plotting point 'I' in horizontal projection dBs : dict / None Dict of properties for plotting point 'Bs' in Cross-section projection dBsHor : dict / None Dict of properties for plotting point 'Bs' in horizontal projection dBv : dict / None Dict of properties for plotting point 'Bv' in Cross-section projection dBvHor : dict / None Dict of properties for plotting point 'Bv' in horizontal projection dVect : dict / None Dict of properties for plotting point 'V' in cross-section projection dLeg : dict / None Dict of properties for plotting the legend, fed to plt.legend() The legend is not plotted if None Lim : list or tuple Array of a lower and upper limit of angle (rad.) or length for plotting the '3d' proj Nstep : int Number of points for sampling in ignorable coordinate (toroidal angle or length) draw : bool Flag indicating whether the fig.canvas.draw() shall be called automatically a4 : bool Flag indicating whether the figure should be plotted in a4 dimensions for printing Test : bool Flag indicating whether the inputs should be tested for conformity Returns ------- La list / plt.Axes Handles of the axes used for plotting (list if several axes where used) """ kwdargs = locals() lout = ['self'] for k in lout: del kwdargs[k] return _plot.Struct_plot(self, **kwdargs)
[ "def", "plot", "(", "self", ",", "lax", "=", "None", ",", "proj", "=", "'all'", ",", "element", "=", "'PIBsBvV'", ",", "dP", "=", "None", ",", "dI", "=", "_def", ".", "TorId", ",", "dBs", "=", "_def", ".", "TorBsd", ",", "dBv", "=", "_def", "."...
Plot the polygon defining the vessel, in chosen projection Generic method for plotting the Ves object The projections to be plotted, the elements to plot can be specified Dictionaries of properties for each elements can also be specified If an ax is not provided a default one is created. Parameters ---------- Lax : list or plt.Axes The axes to be used for plotting Provide a list of 2 axes if proj='All' If None a new figure with axes is created proj : str Flag specifying the kind of projection - 'Cross' : cross-section projection - 'Hor' : horizontal projection - 'All' : both - '3d' : a 3d matplotlib plot element : str Flag specifying which elements to plot Each capital letter corresponds to an element: * 'P': polygon * 'I': point used as a reference for impact parameters * 'Bs': (surfacic) center of mass * 'Bv': (volumic) center of mass for Tor type * 'V': vector pointing inward perpendicular to each segment dP : dict / None Dict of properties for plotting the polygon Fed to plt.Axes.plot() or plt.plot_surface() if proj='3d' dI : dict / None Dict of properties for plotting point 'I' in Cross-section projection dIHor : dict / None Dict of properties for plotting point 'I' in horizontal projection dBs : dict / None Dict of properties for plotting point 'Bs' in Cross-section projection dBsHor : dict / None Dict of properties for plotting point 'Bs' in horizontal projection dBv : dict / None Dict of properties for plotting point 'Bv' in Cross-section projection dBvHor : dict / None Dict of properties for plotting point 'Bv' in horizontal projection dVect : dict / None Dict of properties for plotting point 'V' in cross-section projection dLeg : dict / None Dict of properties for plotting the legend, fed to plt.legend() The legend is not plotted if None Lim : list or tuple Array of a lower and upper limit of angle (rad.) or length for plotting the '3d' proj Nstep : int Number of points for sampling in ignorable coordinate (toroidal angle or length) draw : bool Flag indicating whether the fig.canvas.draw() shall be called automatically a4 : bool Flag indicating whether the figure should be plotted in a4 dimensions for printing Test : bool Flag indicating whether the inputs should be tested for conformity Returns ------- La list / plt.Axes Handles of the axes used for plotting (list if several axes where used)
[ "Plot", "the", "polygon", "defining", "the", "vessel", "in", "chosen", "projection" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L868-L943
train
33,050
ToFuProject/tofu
tofu/geom/_core.py
Struct.plot_sino
def plot_sino(self, ax=None, Ang=_def.LOSImpAng, AngUnit=_def.LOSImpAngUnit, Sketch=True, dP=None, dLeg=_def.TorLegd, draw=True, fs=None, wintit=None, Test=True): """ Plot the sinogram of the vessel polygon, by computing its envelopp in a cross-section, can also plot a 3D version of it The envelop of the polygon is computed using self.Sino_RefPt as a reference point in projection space, and plotted using the provided dictionary of properties. Optionaly a small sketch can be included illustrating how the angle and the impact parameters are defined (if the axes is not provided). Parameters ---------- proj : str Flag indicating whether to plot a classic sinogram ('Cross') from the vessel cross-section (assuming 2D) or an extended 3D version '3d' of it with additional angle ax : None or plt.Axes The axes on which the plot should be done, if None a new figure and axes is created Ang : str Flag indicating which angle to use for the impact parameter, the angle of the line itself (xi) or of its impact parameter (theta) AngUnit : str Flag for the angle units to be displayed, 'rad' for radians or 'deg' for degrees Sketch : bool Flag indicating whether a small skecth showing the definitions of angles 'theta' and 'xi' should be included or not Pdict : dict Dictionary of properties used for plotting the polygon envelopp, fed to plt.plot() if proj='Cross' and to plt.plot_surface() if proj='3d' LegDict : None or dict Dictionary of properties used for plotting the legend, fed to plt.legend(), the legend is not plotted if None draw : bool Flag indicating whether the fig.canvas.draw() shall be called automatically a4 : bool Flag indicating whether the figure should be plotted in a4 dimensions for printing Test : bool Flag indicating whether the inputs shall be tested for conformity Returns ------- ax : plt.Axes The axes used to plot """ if Test: msg = "The impact parameters must be set ! (self.set_dsino())" assert not self.dsino['RefPt'] is None, msg # Only plot cross sino, from version 1.4.0 dP = _def.TorPFilld if dP is None else dP ax = _plot.Plot_Impact_PolProjPoly(self, ax=ax, Ang=Ang, AngUnit=AngUnit, Sketch=Sketch, Leg=self.Id.NameLTX, dP=dP, dLeg=dLeg, draw=False, fs=fs, wintit=wintit, Test=Test) # else: # Pdict = _def.TorP3DFilld if Pdict is None else Pdict # ax = _plot.Plot_Impact_3DPoly(self, ax=ax, Ang=Ang, AngUnit=AngUnit, # Pdict=Pdict, dLeg=LegDict, draw=False, # fs=fs, wintit=wintit, Test=Test) if draw: ax.figure.canvas.draw() return ax
python
def plot_sino(self, ax=None, Ang=_def.LOSImpAng, AngUnit=_def.LOSImpAngUnit, Sketch=True, dP=None, dLeg=_def.TorLegd, draw=True, fs=None, wintit=None, Test=True): """ Plot the sinogram of the vessel polygon, by computing its envelopp in a cross-section, can also plot a 3D version of it The envelop of the polygon is computed using self.Sino_RefPt as a reference point in projection space, and plotted using the provided dictionary of properties. Optionaly a small sketch can be included illustrating how the angle and the impact parameters are defined (if the axes is not provided). Parameters ---------- proj : str Flag indicating whether to plot a classic sinogram ('Cross') from the vessel cross-section (assuming 2D) or an extended 3D version '3d' of it with additional angle ax : None or plt.Axes The axes on which the plot should be done, if None a new figure and axes is created Ang : str Flag indicating which angle to use for the impact parameter, the angle of the line itself (xi) or of its impact parameter (theta) AngUnit : str Flag for the angle units to be displayed, 'rad' for radians or 'deg' for degrees Sketch : bool Flag indicating whether a small skecth showing the definitions of angles 'theta' and 'xi' should be included or not Pdict : dict Dictionary of properties used for plotting the polygon envelopp, fed to plt.plot() if proj='Cross' and to plt.plot_surface() if proj='3d' LegDict : None or dict Dictionary of properties used for plotting the legend, fed to plt.legend(), the legend is not plotted if None draw : bool Flag indicating whether the fig.canvas.draw() shall be called automatically a4 : bool Flag indicating whether the figure should be plotted in a4 dimensions for printing Test : bool Flag indicating whether the inputs shall be tested for conformity Returns ------- ax : plt.Axes The axes used to plot """ if Test: msg = "The impact parameters must be set ! (self.set_dsino())" assert not self.dsino['RefPt'] is None, msg # Only plot cross sino, from version 1.4.0 dP = _def.TorPFilld if dP is None else dP ax = _plot.Plot_Impact_PolProjPoly(self, ax=ax, Ang=Ang, AngUnit=AngUnit, Sketch=Sketch, Leg=self.Id.NameLTX, dP=dP, dLeg=dLeg, draw=False, fs=fs, wintit=wintit, Test=Test) # else: # Pdict = _def.TorP3DFilld if Pdict is None else Pdict # ax = _plot.Plot_Impact_3DPoly(self, ax=ax, Ang=Ang, AngUnit=AngUnit, # Pdict=Pdict, dLeg=LegDict, draw=False, # fs=fs, wintit=wintit, Test=Test) if draw: ax.figure.canvas.draw() return ax
[ "def", "plot_sino", "(", "self", ",", "ax", "=", "None", ",", "Ang", "=", "_def", ".", "LOSImpAng", ",", "AngUnit", "=", "_def", ".", "LOSImpAngUnit", ",", "Sketch", "=", "True", ",", "dP", "=", "None", ",", "dLeg", "=", "_def", ".", "TorLegd", ","...
Plot the sinogram of the vessel polygon, by computing its envelopp in a cross-section, can also plot a 3D version of it The envelop of the polygon is computed using self.Sino_RefPt as a reference point in projection space, and plotted using the provided dictionary of properties. Optionaly a small sketch can be included illustrating how the angle and the impact parameters are defined (if the axes is not provided). Parameters ---------- proj : str Flag indicating whether to plot a classic sinogram ('Cross') from the vessel cross-section (assuming 2D) or an extended 3D version '3d' of it with additional angle ax : None or plt.Axes The axes on which the plot should be done, if None a new figure and axes is created Ang : str Flag indicating which angle to use for the impact parameter, the angle of the line itself (xi) or of its impact parameter (theta) AngUnit : str Flag for the angle units to be displayed, 'rad' for radians or 'deg' for degrees Sketch : bool Flag indicating whether a small skecth showing the definitions of angles 'theta' and 'xi' should be included or not Pdict : dict Dictionary of properties used for plotting the polygon envelopp, fed to plt.plot() if proj='Cross' and to plt.plot_surface() if proj='3d' LegDict : None or dict Dictionary of properties used for plotting the legend, fed to plt.legend(), the legend is not plotted if None draw : bool Flag indicating whether the fig.canvas.draw() shall be called automatically a4 : bool Flag indicating whether the figure should be plotted in a4 dimensions for printing Test : bool Flag indicating whether the inputs shall be tested for conformity Returns ------- ax : plt.Axes The axes used to plot
[ "Plot", "the", "sinogram", "of", "the", "vessel", "polygon", "by", "computing", "its", "envelopp", "in", "a", "cross", "-", "section", "can", "also", "plot", "a", "3D", "version", "of", "it" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L946-L1006
train
33,051
ToFuProject/tofu
tofu/geom/_core.py
Config.lStruct
def lStruct(self): """ Return the list of Struct that was used for creation As tofu objects or SavePath+SaveNames (according to strip status) """ lStruct = [] for k in self._dStruct['lorder']: k0, k1 = k.split('_') lStruct.append(self._dStruct['dObj'][k0][k1]) return lStruct
python
def lStruct(self): """ Return the list of Struct that was used for creation As tofu objects or SavePath+SaveNames (according to strip status) """ lStruct = [] for k in self._dStruct['lorder']: k0, k1 = k.split('_') lStruct.append(self._dStruct['dObj'][k0][k1]) return lStruct
[ "def", "lStruct", "(", "self", ")", ":", "lStruct", "=", "[", "]", "for", "k", "in", "self", ".", "_dStruct", "[", "'lorder'", "]", ":", "k0", ",", "k1", "=", "k", ".", "split", "(", "'_'", ")", "lStruct", ".", "append", "(", "self", ".", "_dSt...
Return the list of Struct that was used for creation As tofu objects or SavePath+SaveNames (according to strip status)
[ "Return", "the", "list", "of", "Struct", "that", "was", "used", "for", "creation" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L2049-L2058
train
33,052
ToFuProject/tofu
tofu/geom/_core.py
Config.lStructIn
def lStructIn(self): """ Return the list of StructIn contained in self.lStruct As tofu objects or SavePath+SaveNames (according to strip status) """ lStruct = [] for k in self._dStruct['lorder']: k0, k1 = k.split('_') if type(self._dStruct['dObj'][k0][k1]) is str: if any([ss in self._dStruct['dObj'][k0][k1] for ss in ['Ves','PlasmaDomain']]): lStruct.append(self._dStruct['dObj'][k0][k1]) elif issubclass(self._dStruct['dObj'][k0][k1].__class__, StructIn): lStruct.append(self._dStruct['dObj'][k0][k1]) return lStruct
python
def lStructIn(self): """ Return the list of StructIn contained in self.lStruct As tofu objects or SavePath+SaveNames (according to strip status) """ lStruct = [] for k in self._dStruct['lorder']: k0, k1 = k.split('_') if type(self._dStruct['dObj'][k0][k1]) is str: if any([ss in self._dStruct['dObj'][k0][k1] for ss in ['Ves','PlasmaDomain']]): lStruct.append(self._dStruct['dObj'][k0][k1]) elif issubclass(self._dStruct['dObj'][k0][k1].__class__, StructIn): lStruct.append(self._dStruct['dObj'][k0][k1]) return lStruct
[ "def", "lStructIn", "(", "self", ")", ":", "lStruct", "=", "[", "]", "for", "k", "in", "self", ".", "_dStruct", "[", "'lorder'", "]", ":", "k0", ",", "k1", "=", "k", ".", "split", "(", "'_'", ")", "if", "type", "(", "self", ".", "_dStruct", "["...
Return the list of StructIn contained in self.lStruct As tofu objects or SavePath+SaveNames (according to strip status)
[ "Return", "the", "list", "of", "StructIn", "contained", "in", "self", ".", "lStruct" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L2061-L2075
train
33,053
ToFuProject/tofu
tofu/geom/_core.py
Config.get_summary
def get_summary(self, verb=False, max_columns=100, width=1000): """ Summary description of the object content as a pandas DataFrame """ # Make sure the data is accessible msg = "The data is not accessible because self.strip(2) was used !" assert self._dstrip['strip']<2, msg # Build the list d = self._dStruct['dObj'] data = [] for k in self._ddef['dStruct']['order']: if k not in d.keys(): continue for kk in d[k].keys(): lu = [k, self._dStruct['dObj'][k][kk]._Id._dall['Name'], self._dStruct['dObj'][k][kk]._Id._dall['SaveName'], self._dStruct['dObj'][k][kk]._dgeom['nP'], self._dStruct['dObj'][k][kk]._dgeom['noccur'], self._dStruct['dObj'][k][kk]._dgeom['mobile'], self._dStruct['dObj'][k][kk]._dmisc['color']] for pp in self._dextraprop['lprop']: lu.append(self._dextraprop['d'+pp][k][kk]) data.append(lu) # Build the pandas DataFrame col = ['class', 'Name', 'SaveName', 'nP', 'noccur', 'mobile', 'color'] + self._dextraprop['lprop'] df = pd.DataFrame(data, columns=col) pd.set_option('display.max_columns',max_columns) pd.set_option('display.width',width) if verb: print(df) return df
python
def get_summary(self, verb=False, max_columns=100, width=1000): """ Summary description of the object content as a pandas DataFrame """ # Make sure the data is accessible msg = "The data is not accessible because self.strip(2) was used !" assert self._dstrip['strip']<2, msg # Build the list d = self._dStruct['dObj'] data = [] for k in self._ddef['dStruct']['order']: if k not in d.keys(): continue for kk in d[k].keys(): lu = [k, self._dStruct['dObj'][k][kk]._Id._dall['Name'], self._dStruct['dObj'][k][kk]._Id._dall['SaveName'], self._dStruct['dObj'][k][kk]._dgeom['nP'], self._dStruct['dObj'][k][kk]._dgeom['noccur'], self._dStruct['dObj'][k][kk]._dgeom['mobile'], self._dStruct['dObj'][k][kk]._dmisc['color']] for pp in self._dextraprop['lprop']: lu.append(self._dextraprop['d'+pp][k][kk]) data.append(lu) # Build the pandas DataFrame col = ['class', 'Name', 'SaveName', 'nP', 'noccur', 'mobile', 'color'] + self._dextraprop['lprop'] df = pd.DataFrame(data, columns=col) pd.set_option('display.max_columns',max_columns) pd.set_option('display.width',width) if verb: print(df) return df
[ "def", "get_summary", "(", "self", ",", "verb", "=", "False", ",", "max_columns", "=", "100", ",", "width", "=", "1000", ")", ":", "# Make sure the data is accessible", "msg", "=", "\"The data is not accessible because self.strip(2) was used !\"", "assert", "self", "....
Summary description of the object content as a pandas DataFrame
[ "Summary", "description", "of", "the", "object", "content", "as", "a", "pandas", "DataFrame" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L2228-L2261
train
33,054
ToFuProject/tofu
tofu/geom/_core.py
Config.isInside
def isInside(self, pts, In='(X,Y,Z)', log='any'): """ Return a 2D array of bool Equivalent to applying isInside to each Struct Check self.lStruct[0].isInside? for details Arg log determines how Struct with multiple Limits are treated - 'all' : True only if pts belong to all elements - 'any' : True if pts belong to any element """ msg = "Arg pts must be a 1D or 2D np.ndarray !" assert isinstance(pts,np.ndarray) and pts.ndim in [1,2], msg msg = "Arg log must be in ['any','all']" assert log in ['any','all'], msg if pts.ndim==1: msg = "Arg pts must contain the coordinates of a point !" assert pts.size in [2,3], msg pts = pts.reshape((pts.size,1)).astype(float) else: msg = "Arg pts must contain the coordinates of points !" assert pts.shape[0] in [2,3], pts nP = pts.shape[1] ind = np.zeros((self._dStruct['nObj'],nP), dtype=bool) lStruct = self.lStruct for ii in range(0,self._dStruct['nObj']): indi = _GG._Ves_isInside(pts, lStruct[ii].Poly, Lim=lStruct[ii].Lim, nLim=lStruct[ii].noccur, VType=lStruct[ii].Id.Type, In=In, Test=True) if lStruct[ii].noccur>1: if log=='any': indi = np.any(indi,axis=0) else: indi = np.all(indi,axis=0) ind[ii,:] = indi return ind
python
def isInside(self, pts, In='(X,Y,Z)', log='any'): """ Return a 2D array of bool Equivalent to applying isInside to each Struct Check self.lStruct[0].isInside? for details Arg log determines how Struct with multiple Limits are treated - 'all' : True only if pts belong to all elements - 'any' : True if pts belong to any element """ msg = "Arg pts must be a 1D or 2D np.ndarray !" assert isinstance(pts,np.ndarray) and pts.ndim in [1,2], msg msg = "Arg log must be in ['any','all']" assert log in ['any','all'], msg if pts.ndim==1: msg = "Arg pts must contain the coordinates of a point !" assert pts.size in [2,3], msg pts = pts.reshape((pts.size,1)).astype(float) else: msg = "Arg pts must contain the coordinates of points !" assert pts.shape[0] in [2,3], pts nP = pts.shape[1] ind = np.zeros((self._dStruct['nObj'],nP), dtype=bool) lStruct = self.lStruct for ii in range(0,self._dStruct['nObj']): indi = _GG._Ves_isInside(pts, lStruct[ii].Poly, Lim=lStruct[ii].Lim, nLim=lStruct[ii].noccur, VType=lStruct[ii].Id.Type, In=In, Test=True) if lStruct[ii].noccur>1: if log=='any': indi = np.any(indi,axis=0) else: indi = np.all(indi,axis=0) ind[ii,:] = indi return ind
[ "def", "isInside", "(", "self", ",", "pts", ",", "In", "=", "'(X,Y,Z)'", ",", "log", "=", "'any'", ")", ":", "msg", "=", "\"Arg pts must be a 1D or 2D np.ndarray !\"", "assert", "isinstance", "(", "pts", ",", "np", ".", "ndarray", ")", "and", "pts", ".", ...
Return a 2D array of bool Equivalent to applying isInside to each Struct Check self.lStruct[0].isInside? for details Arg log determines how Struct with multiple Limits are treated - 'all' : True only if pts belong to all elements - 'any' : True if pts belong to any element
[ "Return", "a", "2D", "array", "of", "bool" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L2263-L2301
train
33,055
ToFuProject/tofu
tofu/geom/_core.py
Rays.get_sample
def get_sample(self, res, resMode='abs', DL=None, method='sum', ind=None, compact=False): """ Return a linear sampling of the LOS The LOS is sampled into a series a points and segments lengths The resolution (segments length) is <= res The sampling can be done according to different methods It is possible to sample only a subset of the LOS Parameters ---------- res: float Desired resolution resMode: str Flag indicating res should be understood as: - 'abs': an absolute distance in meters - 'rel': a relative distance (fraction of the LOS length) DL: None / iterable The fraction [L1;L2] of the LOS that should be sampled, where L1 and L2 are distances from the starting point of the LOS (LOS.D) method: str Flag indicating which to use for sampling: - 'sum': the LOS is sampled into N segments of equal length, where N is the smallest int such that: * segment length <= resolution(res,resMode) The points returned are the center of each segment - 'simps': the LOS is sampled into N segments of equal length, where N is the smallest int such that: * segment length <= resolution(res,resMode) * N is even The points returned are the egdes of each segment - 'romb': the LOS is sampled into N segments of equal length, where N is the smallest int such that: * segment length <= resolution(res,resMode) * N = 2^k + 1 The points returned are the egdes of each segment Returns ------- pts: np.ndarray A (3,NP) array of NP points along the LOS in (X,Y,Z) coordinates k: np.ndarray A (NP,) array of the points distances from the LOS starting point reseff: float The effective resolution (<= res input), as an absolute distance """ ind = self._check_indch(ind) # preload k kIn = self.kIn kOut = self.kOut # Preformat DL if DL is None: DL = np.array([kIn[ind], kOut[ind]]) elif np.asarray(DL).size==2: DL = np.tile(np.asarray(DL).ravel(),(len(ind),1)).T DL = np.ascontiguousarray(DL).astype(float) assert type(DL) is np.ndarray and DL.ndim==2 assert DL.shape==(2,len(ind)), "Arg DL has wrong shape !" # Check consistency of limits ii = DL[0,:] < kIn[ind] DL[0,ii] = kIn[ind][ii] ii[:] = DL[0,:] >= kOut[ind] DL[0,ii] = kOut[ind][ii] ii[:] = DL[1,:] > kOut[ind] DL[1,ii] = kOut[ind][ii] ii[:] = DL[1,:] <= kIn[ind] DL[1,ii] = kIn[ind][ii] # Preformat Ds, us Ds, us = self.D[:,ind], self.u[:,ind] if len(ind)==1: Ds, us = Ds.reshape((3,1)), us.reshape((3,1)) Ds, us = np.ascontiguousarray(Ds), np.ascontiguousarray(us) # Launch # NB : find a way to exclude cases with DL[0,:]>=DL[1,:] !! # Todo : reverse in _GG : make compact default for faster computation ! lpts, k, reseff = _GG.LOS_get_sample(Ds, us, res, DL, dLMode=resMode, method=method) if compact: pts = np.concatenate(lpts, axis=1) ind = np.array([pt.shape[1] for pt in lpts], dtype=int) ind = np.cumsum(ind)[:-1] return pts, k, reseff, ind else: return lpts, k, reseff
python
def get_sample(self, res, resMode='abs', DL=None, method='sum', ind=None, compact=False): """ Return a linear sampling of the LOS The LOS is sampled into a series a points and segments lengths The resolution (segments length) is <= res The sampling can be done according to different methods It is possible to sample only a subset of the LOS Parameters ---------- res: float Desired resolution resMode: str Flag indicating res should be understood as: - 'abs': an absolute distance in meters - 'rel': a relative distance (fraction of the LOS length) DL: None / iterable The fraction [L1;L2] of the LOS that should be sampled, where L1 and L2 are distances from the starting point of the LOS (LOS.D) method: str Flag indicating which to use for sampling: - 'sum': the LOS is sampled into N segments of equal length, where N is the smallest int such that: * segment length <= resolution(res,resMode) The points returned are the center of each segment - 'simps': the LOS is sampled into N segments of equal length, where N is the smallest int such that: * segment length <= resolution(res,resMode) * N is even The points returned are the egdes of each segment - 'romb': the LOS is sampled into N segments of equal length, where N is the smallest int such that: * segment length <= resolution(res,resMode) * N = 2^k + 1 The points returned are the egdes of each segment Returns ------- pts: np.ndarray A (3,NP) array of NP points along the LOS in (X,Y,Z) coordinates k: np.ndarray A (NP,) array of the points distances from the LOS starting point reseff: float The effective resolution (<= res input), as an absolute distance """ ind = self._check_indch(ind) # preload k kIn = self.kIn kOut = self.kOut # Preformat DL if DL is None: DL = np.array([kIn[ind], kOut[ind]]) elif np.asarray(DL).size==2: DL = np.tile(np.asarray(DL).ravel(),(len(ind),1)).T DL = np.ascontiguousarray(DL).astype(float) assert type(DL) is np.ndarray and DL.ndim==2 assert DL.shape==(2,len(ind)), "Arg DL has wrong shape !" # Check consistency of limits ii = DL[0,:] < kIn[ind] DL[0,ii] = kIn[ind][ii] ii[:] = DL[0,:] >= kOut[ind] DL[0,ii] = kOut[ind][ii] ii[:] = DL[1,:] > kOut[ind] DL[1,ii] = kOut[ind][ii] ii[:] = DL[1,:] <= kIn[ind] DL[1,ii] = kIn[ind][ii] # Preformat Ds, us Ds, us = self.D[:,ind], self.u[:,ind] if len(ind)==1: Ds, us = Ds.reshape((3,1)), us.reshape((3,1)) Ds, us = np.ascontiguousarray(Ds), np.ascontiguousarray(us) # Launch # NB : find a way to exclude cases with DL[0,:]>=DL[1,:] !! # Todo : reverse in _GG : make compact default for faster computation ! lpts, k, reseff = _GG.LOS_get_sample(Ds, us, res, DL, dLMode=resMode, method=method) if compact: pts = np.concatenate(lpts, axis=1) ind = np.array([pt.shape[1] for pt in lpts], dtype=int) ind = np.cumsum(ind)[:-1] return pts, k, reseff, ind else: return lpts, k, reseff
[ "def", "get_sample", "(", "self", ",", "res", ",", "resMode", "=", "'abs'", ",", "DL", "=", "None", ",", "method", "=", "'sum'", ",", "ind", "=", "None", ",", "compact", "=", "False", ")", ":", "ind", "=", "self", ".", "_check_indch", "(", "ind", ...
Return a linear sampling of the LOS The LOS is sampled into a series a points and segments lengths The resolution (segments length) is <= res The sampling can be done according to different methods It is possible to sample only a subset of the LOS Parameters ---------- res: float Desired resolution resMode: str Flag indicating res should be understood as: - 'abs': an absolute distance in meters - 'rel': a relative distance (fraction of the LOS length) DL: None / iterable The fraction [L1;L2] of the LOS that should be sampled, where L1 and L2 are distances from the starting point of the LOS (LOS.D) method: str Flag indicating which to use for sampling: - 'sum': the LOS is sampled into N segments of equal length, where N is the smallest int such that: * segment length <= resolution(res,resMode) The points returned are the center of each segment - 'simps': the LOS is sampled into N segments of equal length, where N is the smallest int such that: * segment length <= resolution(res,resMode) * N is even The points returned are the egdes of each segment - 'romb': the LOS is sampled into N segments of equal length, where N is the smallest int such that: * segment length <= resolution(res,resMode) * N = 2^k + 1 The points returned are the egdes of each segment Returns ------- pts: np.ndarray A (3,NP) array of NP points along the LOS in (X,Y,Z) coordinates k: np.ndarray A (NP,) array of the points distances from the LOS starting point reseff: float The effective resolution (<= res input), as an absolute distance
[ "Return", "a", "linear", "sampling", "of", "the", "LOS" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L3859-L3946
train
33,056
ToFuProject/tofu
tofu/geom/_core.py
Rays.calc_kInkOut_IsoFlux
def calc_kInkOut_IsoFlux(self, lPoly, lVIn=None, Lim=None, kInOut=True): """ Calculate the intersection points of each ray with each isoflux The isofluxes are provided as a list of 2D closed polygons The intersections are the inward and outward intersections They are retruned as two np.ndarrays: kIn and kOut Each array contains the length parameter along the ray for each isoflux Parameters ---------- Returns ------- """ # Preformat input nPoly, lPoly, lVIn = self._kInOut_IsoFlux_inputs_usr(lPoly, lVIn=lVIn) # Prepare output kIn = np.full((self.nRays,nPoly), np.nan) kOut = np.full((self.nRays,nPoly), np.nan) # Compute intersections assert(self._method in ['ref', 'optimized']) if self._method=='ref': for ii in range(0,nPoly): largs, dkwd = self._kInOut_IsoFlux_inputs([lPoly[ii]], lVIn=[lVIn[ii]]) out = _GG.SLOW_LOS_Calc_PInOut_VesStruct(*largs, **dkwd) PIn, POut, kin, kout, VperpIn, vperp, IIn, indout = out kIn[:,ii], kOut[:,ii] = kin, kout elif self._method=="optimized": for ii in range(0,nPoly): largs, dkwd = self._kInOut_IsoFlux_inputs([lPoly[ii]], lVIn=[lVIn[ii]]) out = _GG.LOS_Calc_PInOut_VesStruct(*largs, **dkwd) kin, kout, _, _ = out kIn[:,ii], kOut[:,ii] = kin, kout if kInOut: indok = ~np.isnan(kIn) ind = np.zeros((self.nRays,nPoly), dtype=bool) kInref = np.tile(self.kIn[:,np.newaxis],nPoly) kOutref = np.tile(self.kOut[:,np.newaxis],nPoly) ind[indok] = (kIn[indok]<kInref[indok]) | (kIn[indok]>kOutref[indok]) kIn[ind] = np.nan ind[:] = False indok[:] = ~np.isnan(kOut) ind[indok] = (kOut[indok]<kInref[indok]) | (kOut[indok]>kOutref[indok]) kOut[ind] = np.nan return kIn, kOut
python
def calc_kInkOut_IsoFlux(self, lPoly, lVIn=None, Lim=None, kInOut=True): """ Calculate the intersection points of each ray with each isoflux The isofluxes are provided as a list of 2D closed polygons The intersections are the inward and outward intersections They are retruned as two np.ndarrays: kIn and kOut Each array contains the length parameter along the ray for each isoflux Parameters ---------- Returns ------- """ # Preformat input nPoly, lPoly, lVIn = self._kInOut_IsoFlux_inputs_usr(lPoly, lVIn=lVIn) # Prepare output kIn = np.full((self.nRays,nPoly), np.nan) kOut = np.full((self.nRays,nPoly), np.nan) # Compute intersections assert(self._method in ['ref', 'optimized']) if self._method=='ref': for ii in range(0,nPoly): largs, dkwd = self._kInOut_IsoFlux_inputs([lPoly[ii]], lVIn=[lVIn[ii]]) out = _GG.SLOW_LOS_Calc_PInOut_VesStruct(*largs, **dkwd) PIn, POut, kin, kout, VperpIn, vperp, IIn, indout = out kIn[:,ii], kOut[:,ii] = kin, kout elif self._method=="optimized": for ii in range(0,nPoly): largs, dkwd = self._kInOut_IsoFlux_inputs([lPoly[ii]], lVIn=[lVIn[ii]]) out = _GG.LOS_Calc_PInOut_VesStruct(*largs, **dkwd) kin, kout, _, _ = out kIn[:,ii], kOut[:,ii] = kin, kout if kInOut: indok = ~np.isnan(kIn) ind = np.zeros((self.nRays,nPoly), dtype=bool) kInref = np.tile(self.kIn[:,np.newaxis],nPoly) kOutref = np.tile(self.kOut[:,np.newaxis],nPoly) ind[indok] = (kIn[indok]<kInref[indok]) | (kIn[indok]>kOutref[indok]) kIn[ind] = np.nan ind[:] = False indok[:] = ~np.isnan(kOut) ind[indok] = (kOut[indok]<kInref[indok]) | (kOut[indok]>kOutref[indok]) kOut[ind] = np.nan return kIn, kOut
[ "def", "calc_kInkOut_IsoFlux", "(", "self", ",", "lPoly", ",", "lVIn", "=", "None", ",", "Lim", "=", "None", ",", "kInOut", "=", "True", ")", ":", "# Preformat input", "nPoly", ",", "lPoly", ",", "lVIn", "=", "self", ".", "_kInOut_IsoFlux_inputs_usr", "(",...
Calculate the intersection points of each ray with each isoflux The isofluxes are provided as a list of 2D closed polygons The intersections are the inward and outward intersections They are retruned as two np.ndarrays: kIn and kOut Each array contains the length parameter along the ray for each isoflux Parameters ---------- Returns -------
[ "Calculate", "the", "intersection", "points", "of", "each", "ray", "with", "each", "isoflux" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L4022-L4078
train
33,057
ToFuProject/tofu
tofu/geom/_core.py
Rays.get_touch_dict
def get_touch_dict(self, ind=None, out=bool): """ Get a dictionnary of Cls_Name struct with indices of Rays touching Only includes Struct object with compute = True (as returned by self.lStruct__computeInOut_computeInOut) Also return the associated colors If in is not None, the indices for each Struct are split between: - indok : rays touching Struct and in ind - indout: rays touching Struct but not in ind """ if self.config is None: msg = "Config must be set in order to get touch dict !" raise Exception(msg) dElt = {} ind = self._check_indch(ind, out=bool) for ss in self.lStruct_computeInOut: kn = "%s_%s"%(ss.__class__.__name__, ss.Id.Name) indtouch = self.select(touch=kn, out=bool) if np.any(indtouch): indok = indtouch & ind indout = indtouch & ~ind if np.any(indok) or np.any(indout): if out == int: indok = indok.nonzero()[0] indout = indout.nonzero()[0] dElt[kn] = {'indok':indok, 'indout':indout, 'col':ss.get_color()} return dElt
python
def get_touch_dict(self, ind=None, out=bool): """ Get a dictionnary of Cls_Name struct with indices of Rays touching Only includes Struct object with compute = True (as returned by self.lStruct__computeInOut_computeInOut) Also return the associated colors If in is not None, the indices for each Struct are split between: - indok : rays touching Struct and in ind - indout: rays touching Struct but not in ind """ if self.config is None: msg = "Config must be set in order to get touch dict !" raise Exception(msg) dElt = {} ind = self._check_indch(ind, out=bool) for ss in self.lStruct_computeInOut: kn = "%s_%s"%(ss.__class__.__name__, ss.Id.Name) indtouch = self.select(touch=kn, out=bool) if np.any(indtouch): indok = indtouch & ind indout = indtouch & ~ind if np.any(indok) or np.any(indout): if out == int: indok = indok.nonzero()[0] indout = indout.nonzero()[0] dElt[kn] = {'indok':indok, 'indout':indout, 'col':ss.get_color()} return dElt
[ "def", "get_touch_dict", "(", "self", ",", "ind", "=", "None", ",", "out", "=", "bool", ")", ":", "if", "self", ".", "config", "is", "None", ":", "msg", "=", "\"Config must be set in order to get touch dict !\"", "raise", "Exception", "(", "msg", ")", "dElt"...
Get a dictionnary of Cls_Name struct with indices of Rays touching Only includes Struct object with compute = True (as returned by self.lStruct__computeInOut_computeInOut) Also return the associated colors If in is not None, the indices for each Struct are split between: - indok : rays touching Struct and in ind - indout: rays touching Struct but not in ind
[ "Get", "a", "dictionnary", "of", "Cls_Name", "struct", "with", "indices", "of", "Rays", "touching" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L4373-L4402
train
33,058
piglei/uwsgi-sloth
uwsgi_sloth/models.py
merge_urls_data_to
def merge_urls_data_to(to, food={}): """Merge urls data""" if not to: to.update(food) for url, data in food.items(): if url not in to: to[url] = data else: to[url] = to[url].merge_with(data)
python
def merge_urls_data_to(to, food={}): """Merge urls data""" if not to: to.update(food) for url, data in food.items(): if url not in to: to[url] = data else: to[url] = to[url].merge_with(data)
[ "def", "merge_urls_data_to", "(", "to", ",", "food", "=", "{", "}", ")", ":", "if", "not", "to", ":", "to", ".", "update", "(", "food", ")", "for", "url", ",", "data", "in", "food", ".", "items", "(", ")", ":", "if", "url", "not", "in", "to", ...
Merge urls data
[ "Merge", "urls", "data" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/models.py#L53-L62
train
33,059
piglei/uwsgi-sloth
uwsgi_sloth/models.py
merge_requests_data_to
def merge_requests_data_to(to, food={}): """Merge a small analyzed result to a big one, this function will modify the original ``to``""" if not to: to.update(food) to['requests_counter']['normal'] += food['requests_counter']['normal'] to['requests_counter']['slow'] += food['requests_counter']['slow'] to['total_slow_duration'] += food['total_slow_duration'] for group_name, urls in food['data_details'].items(): if group_name not in to['data_details']: to['data_details'][group_name] = urls else: to_urls = to['data_details'][group_name] to_urls['duration_agr_data'] = to_urls['duration_agr_data'].merge_with( urls['duration_agr_data']) # Merge urls data merge_urls_data_to(to_urls['urls'], urls['urls'])
python
def merge_requests_data_to(to, food={}): """Merge a small analyzed result to a big one, this function will modify the original ``to``""" if not to: to.update(food) to['requests_counter']['normal'] += food['requests_counter']['normal'] to['requests_counter']['slow'] += food['requests_counter']['slow'] to['total_slow_duration'] += food['total_slow_duration'] for group_name, urls in food['data_details'].items(): if group_name not in to['data_details']: to['data_details'][group_name] = urls else: to_urls = to['data_details'][group_name] to_urls['duration_agr_data'] = to_urls['duration_agr_data'].merge_with( urls['duration_agr_data']) # Merge urls data merge_urls_data_to(to_urls['urls'], urls['urls'])
[ "def", "merge_requests_data_to", "(", "to", ",", "food", "=", "{", "}", ")", ":", "if", "not", "to", ":", "to", ".", "update", "(", "food", ")", "to", "[", "'requests_counter'", "]", "[", "'normal'", "]", "+=", "food", "[", "'requests_counter'", "]", ...
Merge a small analyzed result to a big one, this function will modify the original ``to``
[ "Merge", "a", "small", "analyzed", "result", "to", "a", "big", "one", "this", "function", "will", "modify", "the", "original", "to" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/models.py#L65-L84
train
33,060
piglei/uwsgi-sloth
uwsgi_sloth/analyzer.py
format_data
def format_data(raw_data, limit_per_url_group=LIMIT_PER_URL_GROUP, limit_url_groups=LIMIT_URL_GROUPS): """Fomat data from LogAnalyzer for render purpose""" data = copy.deepcopy(raw_data) for k, v in list(data['data_details'].items()): # Only reserve first ``limit_per_url_group`` items v['urls'] = sorted(list(v['urls'].items()), key=lambda k_v: k_v[1].total, reverse=True)[:limit_per_url_group] data_details = sorted(iter(data['data_details'].items()), key=lambda k_v1: k_v1[1]["duration_agr_data"].total, reverse=True)[:limit_url_groups] if data['requests_counter']['normal']: slow_rate = format(data['requests_counter']['slow'] / \ float(data['requests_counter']['normal']), '.2%') else: slow_rate = '-' data.update({ 'slow_rate': slow_rate, 'data_details': data_details, }) return data
python
def format_data(raw_data, limit_per_url_group=LIMIT_PER_URL_GROUP, limit_url_groups=LIMIT_URL_GROUPS): """Fomat data from LogAnalyzer for render purpose""" data = copy.deepcopy(raw_data) for k, v in list(data['data_details'].items()): # Only reserve first ``limit_per_url_group`` items v['urls'] = sorted(list(v['urls'].items()), key=lambda k_v: k_v[1].total, reverse=True)[:limit_per_url_group] data_details = sorted(iter(data['data_details'].items()), key=lambda k_v1: k_v1[1]["duration_agr_data"].total, reverse=True)[:limit_url_groups] if data['requests_counter']['normal']: slow_rate = format(data['requests_counter']['slow'] / \ float(data['requests_counter']['normal']), '.2%') else: slow_rate = '-' data.update({ 'slow_rate': slow_rate, 'data_details': data_details, }) return data
[ "def", "format_data", "(", "raw_data", ",", "limit_per_url_group", "=", "LIMIT_PER_URL_GROUP", ",", "limit_url_groups", "=", "LIMIT_URL_GROUPS", ")", ":", "data", "=", "copy", ".", "deepcopy", "(", "raw_data", ")", "for", "k", ",", "v", "in", "list", "(", "d...
Fomat data from LogAnalyzer for render purpose
[ "Fomat", "data", "from", "LogAnalyzer", "for", "render", "purpose" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/analyzer.py#L221-L242
train
33,061
piglei/uwsgi-sloth
uwsgi_sloth/analyzer.py
URLClassifier.classify
def classify(self, url_path): """Classify an url""" for dict_api_url in self.user_defined_rules: api_url = dict_api_url['str'] re_api_url = dict_api_url['re'] if re_api_url.match(url_path[1:]): return api_url return self.RE_SIMPLIFY_URL.sub(r'(\\d+)/', url_path)
python
def classify(self, url_path): """Classify an url""" for dict_api_url in self.user_defined_rules: api_url = dict_api_url['str'] re_api_url = dict_api_url['re'] if re_api_url.match(url_path[1:]): return api_url return self.RE_SIMPLIFY_URL.sub(r'(\\d+)/', url_path)
[ "def", "classify", "(", "self", ",", "url_path", ")", ":", "for", "dict_api_url", "in", "self", ".", "user_defined_rules", ":", "api_url", "=", "dict_api_url", "[", "'str'", "]", "re_api_url", "=", "dict_api_url", "[", "'re'", "]", "if", "re_api_url", ".", ...
Classify an url
[ "Classify", "an", "url" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/analyzer.py#L69-L77
train
33,062
daveoncode/pyvaru
pyvaru/__init__.py
ValidationResult.annotate_rule_violation
def annotate_rule_violation(self, rule: ValidationRule) -> None: """ Takes note of a rule validation failure by collecting its error message. :param rule: Rule that failed validation. :type rule: ValidationRule :return: None """ if self.errors.get(rule.label) is None: self.errors[rule.label] = [] self.errors[rule.label].append(rule.get_error_message())
python
def annotate_rule_violation(self, rule: ValidationRule) -> None: """ Takes note of a rule validation failure by collecting its error message. :param rule: Rule that failed validation. :type rule: ValidationRule :return: None """ if self.errors.get(rule.label) is None: self.errors[rule.label] = [] self.errors[rule.label].append(rule.get_error_message())
[ "def", "annotate_rule_violation", "(", "self", ",", "rule", ":", "ValidationRule", ")", "->", "None", ":", "if", "self", ".", "errors", ".", "get", "(", "rule", ".", "label", ")", "is", "None", ":", "self", ".", "errors", "[", "rule", ".", "label", "...
Takes note of a rule validation failure by collecting its error message. :param rule: Rule that failed validation. :type rule: ValidationRule :return: None
[ "Takes", "note", "of", "a", "rule", "validation", "failure", "by", "collecting", "its", "error", "message", "." ]
78ad9c55d44aef1f24028b2c83e7de12f2698abb
https://github.com/daveoncode/pyvaru/blob/78ad9c55d44aef1f24028b2c83e7de12f2698abb/pyvaru/__init__.py#L187-L197
train
33,063
ToFuProject/tofu
tofu/pathfile.py
get_PolyFromPolyFileObj
def get_PolyFromPolyFileObj(PolyFileObj, SavePathInp=None, units='m', comments='#', skiprows=0, shape0=2): """ Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units Useful for :meth:`tofu.plugins.AUG.Ves._create()` Parameters ---------- PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray The source where the polygon is to be found, either: - str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()` - A :mod:`tofu.geom` object: with attribute 'Poly' - np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon SavePathInp : str / None The absolute path where the input file is stored units : str Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters) comments : str Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name skiprows : int Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name shape0 : int Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary) Returns ------- Poly : np.ndarray (2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points addInfo : dict Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted) """ assert type(PolyFileObj) in [list,str] or hasattr(PolyFileObj,"Poly") or np.asarray(PolyFileObj).ndim==2, "Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !" # Load PolyFileObj if file and check shape addInfo = {} if type(PolyFileObj) in [list,str]: PathFileExt = get_FileFromInfos(Path=SavePathInp, Name=PolyFileObj) # Include PathFileExt in ID for tracability addInfo = {'Input':PathFileExt} PolyFileObj = np.loadtxt(PathFileExt, dtype=float, comments=comments, delimiter=None, converters=None, skiprows=skiprows, usecols=None, unpack=False, ndmin=2) elif hasattr(PolyFileObj,"Poly"): addInfo = {'Input':PolyFileObj.Id.SaveName} PolyFileObj = PolyFileObj.Poly Poly = np.asarray(PolyFileObj) assert Poly.ndim==2 and shape0 in Poly.shape and max(Poly.shape)>=3 and not np.any(np.isnan(Poly)), "Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !" Poly = Poly if Poly.shape[0]==shape0 else Poly.T Poly = convert_units(Poly, In=units, Out='m') return Poly, addInfo
python
def get_PolyFromPolyFileObj(PolyFileObj, SavePathInp=None, units='m', comments='#', skiprows=0, shape0=2): """ Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units Useful for :meth:`tofu.plugins.AUG.Ves._create()` Parameters ---------- PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray The source where the polygon is to be found, either: - str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()` - A :mod:`tofu.geom` object: with attribute 'Poly' - np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon SavePathInp : str / None The absolute path where the input file is stored units : str Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters) comments : str Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name skiprows : int Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name shape0 : int Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary) Returns ------- Poly : np.ndarray (2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points addInfo : dict Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted) """ assert type(PolyFileObj) in [list,str] or hasattr(PolyFileObj,"Poly") or np.asarray(PolyFileObj).ndim==2, "Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !" # Load PolyFileObj if file and check shape addInfo = {} if type(PolyFileObj) in [list,str]: PathFileExt = get_FileFromInfos(Path=SavePathInp, Name=PolyFileObj) # Include PathFileExt in ID for tracability addInfo = {'Input':PathFileExt} PolyFileObj = np.loadtxt(PathFileExt, dtype=float, comments=comments, delimiter=None, converters=None, skiprows=skiprows, usecols=None, unpack=False, ndmin=2) elif hasattr(PolyFileObj,"Poly"): addInfo = {'Input':PolyFileObj.Id.SaveName} PolyFileObj = PolyFileObj.Poly Poly = np.asarray(PolyFileObj) assert Poly.ndim==2 and shape0 in Poly.shape and max(Poly.shape)>=3 and not np.any(np.isnan(Poly)), "Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !" Poly = Poly if Poly.shape[0]==shape0 else Poly.T Poly = convert_units(Poly, In=units, Out='m') return Poly, addInfo
[ "def", "get_PolyFromPolyFileObj", "(", "PolyFileObj", ",", "SavePathInp", "=", "None", ",", "units", "=", "'m'", ",", "comments", "=", "'#'", ",", "skiprows", "=", "0", ",", "shape0", "=", "2", ")", ":", "assert", "type", "(", "PolyFileObj", ")", "in", ...
Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units Useful for :meth:`tofu.plugins.AUG.Ves._create()` Parameters ---------- PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray The source where the polygon is to be found, either: - str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()` - A :mod:`tofu.geom` object: with attribute 'Poly' - np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon SavePathInp : str / None The absolute path where the input file is stored units : str Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters) comments : str Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name skiprows : int Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name shape0 : int Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary) Returns ------- Poly : np.ndarray (2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points addInfo : dict Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted)
[ "Return", "a", "polygon", "as", "a", "np", ".", "ndarray", "extracted", "from", "a", "txt", "file", "or", "from", "a", "ToFu", "object", "with", "appropriate", "units" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/pathfile.py#L73-L121
train
33,064
ToFuProject/tofu
tofu/pathfile.py
CheckSameObj
def CheckSameObj(obj0, obj1, LFields=None): """ Check if two variables are the same instance of a ToFu class Checks a list of attributes, provided by LField Parameters ---------- obj0 : tofu object A variable refering to a ToFu object of any class obj1 : tofu object A variable refering to a ToFu object of the same class as obj0 LFields : None / str / list The criteria against which the two objects are evaluated: - None: True is returned - str or list: tests whether all listed attributes have the same value Returns ------- A : bool True only is LField is None or a list of attributes that all match """ A = True if LField is not None and obj0.__class__==obj1.__class__: assert type(LFields) in [str,list] if type(LFields) is str: LFields = [LFields] assert all([type(s) is str for s in LFields]) ind = [False for ii in range(0,len(LFields))] Dir0 = dir(obj0.Id)+dir(obj0) Dir1 = dir(obj1.Id)+dir(obj1) for ii in range(0,len(LFields)): assert LFields[ii] in Dir0, LFields[ii]+" not in "+obj0.Id.Name assert LFields[ii] in Dir1, LFields[ii]+" not in "+obj1.Id.Name if hasattr(obj0,LFields[ii]): ind[ii] = np.all(getattr(obj0,LFields[ii])==getattr(obj1,LFields[ii])) else: ind[ii] = getattr(obj0.Id,LFields[ii])==getattr(obj1.Id,LFields[ii]) A = all(ind) return A
python
def CheckSameObj(obj0, obj1, LFields=None): """ Check if two variables are the same instance of a ToFu class Checks a list of attributes, provided by LField Parameters ---------- obj0 : tofu object A variable refering to a ToFu object of any class obj1 : tofu object A variable refering to a ToFu object of the same class as obj0 LFields : None / str / list The criteria against which the two objects are evaluated: - None: True is returned - str or list: tests whether all listed attributes have the same value Returns ------- A : bool True only is LField is None or a list of attributes that all match """ A = True if LField is not None and obj0.__class__==obj1.__class__: assert type(LFields) in [str,list] if type(LFields) is str: LFields = [LFields] assert all([type(s) is str for s in LFields]) ind = [False for ii in range(0,len(LFields))] Dir0 = dir(obj0.Id)+dir(obj0) Dir1 = dir(obj1.Id)+dir(obj1) for ii in range(0,len(LFields)): assert LFields[ii] in Dir0, LFields[ii]+" not in "+obj0.Id.Name assert LFields[ii] in Dir1, LFields[ii]+" not in "+obj1.Id.Name if hasattr(obj0,LFields[ii]): ind[ii] = np.all(getattr(obj0,LFields[ii])==getattr(obj1,LFields[ii])) else: ind[ii] = getattr(obj0.Id,LFields[ii])==getattr(obj1.Id,LFields[ii]) A = all(ind) return A
[ "def", "CheckSameObj", "(", "obj0", ",", "obj1", ",", "LFields", "=", "None", ")", ":", "A", "=", "True", "if", "LField", "is", "not", "None", "and", "obj0", ".", "__class__", "==", "obj1", ".", "__class__", ":", "assert", "type", "(", "LFields", ")"...
Check if two variables are the same instance of a ToFu class Checks a list of attributes, provided by LField Parameters ---------- obj0 : tofu object A variable refering to a ToFu object of any class obj1 : tofu object A variable refering to a ToFu object of the same class as obj0 LFields : None / str / list The criteria against which the two objects are evaluated: - None: True is returned - str or list: tests whether all listed attributes have the same value Returns ------- A : bool True only is LField is None or a list of attributes that all match
[ "Check", "if", "two", "variables", "are", "the", "same", "instance", "of", "a", "ToFu", "class" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/pathfile.py#L525-L564
train
33,065
ToFuProject/tofu
tofu/pathfile.py
Save_Generic
def Save_Generic(obj, SaveName=None, Path='./', Mode='npz', compressed=False, Print=True): """ Save a ToFu object under file name SaveName, in folder Path ToFu provides built-in saving and loading functions for ToFu objects. There is now only one saving mode: - 'npz': saves a dict of key attributes using :meth:`numpy.savez` Good practices are: - save :class:`~tofu.geom.Ves` and :class:`~tofu.geom.Struct` - intermediate optics (:class:`~tofu.geom.Apert` and :class:`~tofu.geom.Lens`) generally do not need to be saved Indeed, they will be autoamtically included in larger objects like Detect or Cam objects Parameters ---------- SaveName : str The file name, if None (recommended) uses obj.Id.SaveName Path : str Path where to save the file Mode : str Flag specifying the saving mode - 'npz': Only mode currently available ('pck' deprecated) compressed : bool Indicate whether to use np.savez_compressed (slower but smaller files) """ assert type(obj.__class__) is type if SaveName is not None: C = type(SaveName) is str and not (SaveName[-4]=='.') assert C, "SaveName should not include the extension !" assert Path is None or type(Path) is str assert Mode in ['npz'] assert type(compressed) is bool assert type(Print) is bool if Path is None: Path = obj.Id.SavePath else: obj._Id._SavePath = Path if Mode=='npz': Ext = '.npz' if SaveName is None: SaveName = obj.Id.SaveName else: obj._Id.set_SaveName(SaveName) pathfileext = os.path.join(Path,SaveName+Ext) if Ext=='.npz': _save_np(obj, pathfileext, compressed=compressed) if Print: print("Saved in : "+pathfileext)
python
def Save_Generic(obj, SaveName=None, Path='./', Mode='npz', compressed=False, Print=True): """ Save a ToFu object under file name SaveName, in folder Path ToFu provides built-in saving and loading functions for ToFu objects. There is now only one saving mode: - 'npz': saves a dict of key attributes using :meth:`numpy.savez` Good practices are: - save :class:`~tofu.geom.Ves` and :class:`~tofu.geom.Struct` - intermediate optics (:class:`~tofu.geom.Apert` and :class:`~tofu.geom.Lens`) generally do not need to be saved Indeed, they will be autoamtically included in larger objects like Detect or Cam objects Parameters ---------- SaveName : str The file name, if None (recommended) uses obj.Id.SaveName Path : str Path where to save the file Mode : str Flag specifying the saving mode - 'npz': Only mode currently available ('pck' deprecated) compressed : bool Indicate whether to use np.savez_compressed (slower but smaller files) """ assert type(obj.__class__) is type if SaveName is not None: C = type(SaveName) is str and not (SaveName[-4]=='.') assert C, "SaveName should not include the extension !" assert Path is None or type(Path) is str assert Mode in ['npz'] assert type(compressed) is bool assert type(Print) is bool if Path is None: Path = obj.Id.SavePath else: obj._Id._SavePath = Path if Mode=='npz': Ext = '.npz' if SaveName is None: SaveName = obj.Id.SaveName else: obj._Id.set_SaveName(SaveName) pathfileext = os.path.join(Path,SaveName+Ext) if Ext=='.npz': _save_np(obj, pathfileext, compressed=compressed) if Print: print("Saved in : "+pathfileext)
[ "def", "Save_Generic", "(", "obj", ",", "SaveName", "=", "None", ",", "Path", "=", "'./'", ",", "Mode", "=", "'npz'", ",", "compressed", "=", "False", ",", "Print", "=", "True", ")", ":", "assert", "type", "(", "obj", ".", "__class__", ")", "is", "...
Save a ToFu object under file name SaveName, in folder Path ToFu provides built-in saving and loading functions for ToFu objects. There is now only one saving mode: - 'npz': saves a dict of key attributes using :meth:`numpy.savez` Good practices are: - save :class:`~tofu.geom.Ves` and :class:`~tofu.geom.Struct` - intermediate optics (:class:`~tofu.geom.Apert` and :class:`~tofu.geom.Lens`) generally do not need to be saved Indeed, they will be autoamtically included in larger objects like Detect or Cam objects Parameters ---------- SaveName : str The file name, if None (recommended) uses obj.Id.SaveName Path : str Path where to save the file Mode : str Flag specifying the saving mode - 'npz': Only mode currently available ('pck' deprecated) compressed : bool Indicate whether to use np.savez_compressed (slower but smaller files)
[ "Save", "a", "ToFu", "object", "under", "file", "name", "SaveName", "in", "folder", "Path" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/pathfile.py#L904-L954
train
33,066
ToFuProject/tofu
tofu/pathfile.py
Open
def Open(pathfileext=None, shot=None, t=None, Dt=None, Mesh=None, Deg=None, Deriv=None, Sep=True, Pos=True, OutPath=None, ReplacePath=None, Ves=None, out='full', Verb=False, Print=True): """ Open a ToFu object saved file This generic open function identifies the required loading routine by detecting how the object was saved from the file name extension. Also, it uses :meth:`~tofu.pathfile.FindSolFile()` to identify the relevant file in case key criteria such as shot, Deg... are provided instead of the file name itself. Finally, once all the relevant data is loaded from the file, a ToFu object is re-created, if necessary by implicitly loading all other objects it may depend on (i.e.: vessel, apertures...) If pathfileext is not provided (None), then the following keyword arguments are fed to :meth:`~tofu.pathfile.FindSolFile()`: shot, t, Dt, Mesh, Deg, Deriv, Sep, Pos Parameters ---------- pathfileext : None / str If provided, the name of the file to load OutPath : None / str If provided, the absolute path where the file is to be found ReplacePath : str If provided, ? (to finish) Ves : None / If provided, the :class:`tofu.geom.Ves` object that shall be used to reconstruct the object (if not provided, the appropriate vessel will be loaded). out : str Flag indicating whether the object should be loaded completely ('full'), in a light dismissing the heaviest attributes ('light') or whether only the Id or a list of Id should be returned ('Id'), valid only for '.npz' Verb : bool Flag indicating whether to pring intermediate comments on the loading procedure Returns ------- obj ToFu object The loaded and re-created ToFu object """ assert None in [pathfileext,shot] and not (pathfileext is None and shot is None), "Arg pathfileext or shot must be None, but not both !" if pathfileext is None: File = FindSolFile(shot=shot, t=t, Dt=Dt, Mesh=Mesh, Deg=Deg, Deriv=Deriv, Sep=Sep, Pos=Pos, OutPath=OutPath) if File is None: return File pathfileext = os.path.join(OutPath,File) C = any([ss in pathfileext for ss in ['.npz']]) assert C, "Arg pathfileext must contain '.npz' !" if '.npz' in pathfileext: obj = _open_np(pathfileext, Ves=Ves, ReplacePath=ReplacePath, out=out, Verb=Verb, Print=Print) if Print: print("Loaded : "+pathfileext) return obj
python
def Open(pathfileext=None, shot=None, t=None, Dt=None, Mesh=None, Deg=None, Deriv=None, Sep=True, Pos=True, OutPath=None, ReplacePath=None, Ves=None, out='full', Verb=False, Print=True): """ Open a ToFu object saved file This generic open function identifies the required loading routine by detecting how the object was saved from the file name extension. Also, it uses :meth:`~tofu.pathfile.FindSolFile()` to identify the relevant file in case key criteria such as shot, Deg... are provided instead of the file name itself. Finally, once all the relevant data is loaded from the file, a ToFu object is re-created, if necessary by implicitly loading all other objects it may depend on (i.e.: vessel, apertures...) If pathfileext is not provided (None), then the following keyword arguments are fed to :meth:`~tofu.pathfile.FindSolFile()`: shot, t, Dt, Mesh, Deg, Deriv, Sep, Pos Parameters ---------- pathfileext : None / str If provided, the name of the file to load OutPath : None / str If provided, the absolute path where the file is to be found ReplacePath : str If provided, ? (to finish) Ves : None / If provided, the :class:`tofu.geom.Ves` object that shall be used to reconstruct the object (if not provided, the appropriate vessel will be loaded). out : str Flag indicating whether the object should be loaded completely ('full'), in a light dismissing the heaviest attributes ('light') or whether only the Id or a list of Id should be returned ('Id'), valid only for '.npz' Verb : bool Flag indicating whether to pring intermediate comments on the loading procedure Returns ------- obj ToFu object The loaded and re-created ToFu object """ assert None in [pathfileext,shot] and not (pathfileext is None and shot is None), "Arg pathfileext or shot must be None, but not both !" if pathfileext is None: File = FindSolFile(shot=shot, t=t, Dt=Dt, Mesh=Mesh, Deg=Deg, Deriv=Deriv, Sep=Sep, Pos=Pos, OutPath=OutPath) if File is None: return File pathfileext = os.path.join(OutPath,File) C = any([ss in pathfileext for ss in ['.npz']]) assert C, "Arg pathfileext must contain '.npz' !" if '.npz' in pathfileext: obj = _open_np(pathfileext, Ves=Ves, ReplacePath=ReplacePath, out=out, Verb=Verb, Print=Print) if Print: print("Loaded : "+pathfileext) return obj
[ "def", "Open", "(", "pathfileext", "=", "None", ",", "shot", "=", "None", ",", "t", "=", "None", ",", "Dt", "=", "None", ",", "Mesh", "=", "None", ",", "Deg", "=", "None", ",", "Deriv", "=", "None", ",", "Sep", "=", "True", ",", "Pos", "=", "...
Open a ToFu object saved file This generic open function identifies the required loading routine by detecting how the object was saved from the file name extension. Also, it uses :meth:`~tofu.pathfile.FindSolFile()` to identify the relevant file in case key criteria such as shot, Deg... are provided instead of the file name itself. Finally, once all the relevant data is loaded from the file, a ToFu object is re-created, if necessary by implicitly loading all other objects it may depend on (i.e.: vessel, apertures...) If pathfileext is not provided (None), then the following keyword arguments are fed to :meth:`~tofu.pathfile.FindSolFile()`: shot, t, Dt, Mesh, Deg, Deriv, Sep, Pos Parameters ---------- pathfileext : None / str If provided, the name of the file to load OutPath : None / str If provided, the absolute path where the file is to be found ReplacePath : str If provided, ? (to finish) Ves : None / If provided, the :class:`tofu.geom.Ves` object that shall be used to reconstruct the object (if not provided, the appropriate vessel will be loaded). out : str Flag indicating whether the object should be loaded completely ('full'), in a light dismissing the heaviest attributes ('light') or whether only the Id or a list of Id should be returned ('Id'), valid only for '.npz' Verb : bool Flag indicating whether to pring intermediate comments on the loading procedure Returns ------- obj ToFu object The loaded and re-created ToFu object
[ "Open", "a", "ToFu", "object", "saved", "file" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/pathfile.py#L1176-L1224
train
33,067
ToFuProject/tofu
tofu/pathfile.py
ID.set_LObj
def set_LObj(self,LObj=None): """ Set the LObj attribute, storing objects the instance depends on For example: A Detect object depends on a vessel and some apertures That link between should be stored somewhere (for saving/loading). LObj does this: it stores the ID (as dict) of all objects depended on. Parameters ---------- LObj : None / dict / :class:`~tofu.pathfile.ID` / list of such Provide either: - A dict (derived from :meth:`~tofu.pathfile.ID._todict`) - A :class:`~tofu.pathfile.ID` instance - A list of dict or :class:`~tofu.pathfile.ID` instances """ self._LObj = {} if LObj is not None: if type(LObj) is not list: LObj = [LObj] for ii in range(0,len(LObj)): if type(LObj[ii]) is ID: LObj[ii] = LObj[ii]._todict() ClsU = list(set([oo['Cls'] for oo in LObj])) for c in ClsU: self._LObj[c] = [oo for oo in LObj if oo['Cls']==c]
python
def set_LObj(self,LObj=None): """ Set the LObj attribute, storing objects the instance depends on For example: A Detect object depends on a vessel and some apertures That link between should be stored somewhere (for saving/loading). LObj does this: it stores the ID (as dict) of all objects depended on. Parameters ---------- LObj : None / dict / :class:`~tofu.pathfile.ID` / list of such Provide either: - A dict (derived from :meth:`~tofu.pathfile.ID._todict`) - A :class:`~tofu.pathfile.ID` instance - A list of dict or :class:`~tofu.pathfile.ID` instances """ self._LObj = {} if LObj is not None: if type(LObj) is not list: LObj = [LObj] for ii in range(0,len(LObj)): if type(LObj[ii]) is ID: LObj[ii] = LObj[ii]._todict() ClsU = list(set([oo['Cls'] for oo in LObj])) for c in ClsU: self._LObj[c] = [oo for oo in LObj if oo['Cls']==c]
[ "def", "set_LObj", "(", "self", ",", "LObj", "=", "None", ")", ":", "self", ".", "_LObj", "=", "{", "}", "if", "LObj", "is", "not", "None", ":", "if", "type", "(", "LObj", ")", "is", "not", "list", ":", "LObj", "=", "[", "LObj", "]", "for", "...
Set the LObj attribute, storing objects the instance depends on For example: A Detect object depends on a vessel and some apertures That link between should be stored somewhere (for saving/loading). LObj does this: it stores the ID (as dict) of all objects depended on. Parameters ---------- LObj : None / dict / :class:`~tofu.pathfile.ID` / list of such Provide either: - A dict (derived from :meth:`~tofu.pathfile.ID._todict`) - A :class:`~tofu.pathfile.ID` instance - A list of dict or :class:`~tofu.pathfile.ID` instances
[ "Set", "the", "LObj", "attribute", "storing", "objects", "the", "instance", "depends", "on" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/pathfile.py#L346-L372
train
33,068
ToFuProject/tofu
tofu/pathfile.py
ID.set_USRdict
def set_USRdict(self,USRdict={}): """ Set the USRdict, containing user-defined info about the instance Useful for arbitrary info (e.g.: manufacturing date, material...) Parameters ---------- USRdict : dict A user-defined dictionary containing info about the instance """ self._check_inputs(USRdict=USRdict) self._USRdict = USRdict
python
def set_USRdict(self,USRdict={}): """ Set the USRdict, containing user-defined info about the instance Useful for arbitrary info (e.g.: manufacturing date, material...) Parameters ---------- USRdict : dict A user-defined dictionary containing info about the instance """ self._check_inputs(USRdict=USRdict) self._USRdict = USRdict
[ "def", "set_USRdict", "(", "self", ",", "USRdict", "=", "{", "}", ")", ":", "self", ".", "_check_inputs", "(", "USRdict", "=", "USRdict", ")", "self", ".", "_USRdict", "=", "USRdict" ]
Set the USRdict, containing user-defined info about the instance Useful for arbitrary info (e.g.: manufacturing date, material...) Parameters ---------- USRdict : dict A user-defined dictionary containing info about the instance
[ "Set", "the", "USRdict", "containing", "user", "-", "defined", "info", "about", "the", "instance" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/pathfile.py#L374-L386
train
33,069
ToFuProject/tofu
tofu/geom/utils.py
create_config
def create_config(case=None, Exp='Dummy', Type='Tor', Lim=None, Bump_posextent=[np.pi/4., np.pi/4], R=2.4, r=1., elong=0., Dshape=0., divlow=True, divup=True, nP=200, out='object', SavePath='./'): """ Create easily a tofu.geom.Config object In tofu, a Config (short for geometrical configuration) refers to the 3D geometry of a fusion device. It includes, at least, a simple 2D polygon describing the first wall of the fusion chamber, and can also include other structural elements (tiles, limiters...) that can be non-axisymmetric. To create a simple Config, provide either the name of a reference test case, of a set of geometrical parameters (major radius, elongation...). This is just a tool for fast testing, if you want to create a custom config, use directly tofu.geom.Config and provide the parameters you want. Parameters ---------- case : str The name of a reference test case, if provided, this arguments is sufficient, the others are ignored Exp : str The name of the experiment Type : str The type of configuration (toroidal 'Tor' or linear 'Lin') Lim_Bump: list The angular (poloidal) limits, in the cross-section of the extension of the outer bumper R : float The major radius of the center of the cross-section r : float The minor radius of the cross-section elong: float An elongation parameter (in [-1;1]) Dshape: float A parameter specifying the D-shape of the cross-section (in [-1;1]) divlow: bool A flag specifying whether to include a lower divertor-like shape divup: bool A flag specifying whether to include an upper divertor-like shape nP: int Number of points used to describe the cross-section polygon out: str FLag indicating whether to return: - 'dict' : the polygons as a dictionary of np.ndarrays - 'object': the configuration as a tofu.geom.Config instance Return ------ conf: tofu.geom.Config / dict Depending on the value of parameter out, either: - the tofu.geom.Config object created - a dictionary of the polygons and their pos/extent (if any) """ if case is not None: conf = _create_config_testcase(config=case, out=out) else: poly, pbump, pbaffle = _compute_VesPoly(R=R, r=r, elong=elong, Dshape=Dshape, divlow=divlow, divup=divup, nP=nP) if out=='dict': conf = {'Ves':{'Poly':poly}, 'Baffle':{'Poly':pbaffle}, 'Bumper':{'Poly':pbump, 'pos':Bump_posextent[0], 'extent':Bump_posextent[1]}} else: ves = _core.Ves(Poly=poly, Type=Type, Lim=Lim, Exp=Exp, Name='Ves', SavePath=SavePath) baf = _core.PFC(Poly=pbaffle, Type=Type, Lim=Lim, Exp=Exp, Name='Baffle', color='b', SavePath=SavePath) bump = _core.PFC(Poly=pbump, Type=Type, pos=Bump_posextent[0], extent=Bump_posextent[1], Exp=Exp, Name='Bumper', color='g', SavePath=SavePath) conf = _core.Config(Name='Dummy', Exp=Exp, lStruct=[ves,baf,bump], SavePath=SavePath) return conf
python
def create_config(case=None, Exp='Dummy', Type='Tor', Lim=None, Bump_posextent=[np.pi/4., np.pi/4], R=2.4, r=1., elong=0., Dshape=0., divlow=True, divup=True, nP=200, out='object', SavePath='./'): """ Create easily a tofu.geom.Config object In tofu, a Config (short for geometrical configuration) refers to the 3D geometry of a fusion device. It includes, at least, a simple 2D polygon describing the first wall of the fusion chamber, and can also include other structural elements (tiles, limiters...) that can be non-axisymmetric. To create a simple Config, provide either the name of a reference test case, of a set of geometrical parameters (major radius, elongation...). This is just a tool for fast testing, if you want to create a custom config, use directly tofu.geom.Config and provide the parameters you want. Parameters ---------- case : str The name of a reference test case, if provided, this arguments is sufficient, the others are ignored Exp : str The name of the experiment Type : str The type of configuration (toroidal 'Tor' or linear 'Lin') Lim_Bump: list The angular (poloidal) limits, in the cross-section of the extension of the outer bumper R : float The major radius of the center of the cross-section r : float The minor radius of the cross-section elong: float An elongation parameter (in [-1;1]) Dshape: float A parameter specifying the D-shape of the cross-section (in [-1;1]) divlow: bool A flag specifying whether to include a lower divertor-like shape divup: bool A flag specifying whether to include an upper divertor-like shape nP: int Number of points used to describe the cross-section polygon out: str FLag indicating whether to return: - 'dict' : the polygons as a dictionary of np.ndarrays - 'object': the configuration as a tofu.geom.Config instance Return ------ conf: tofu.geom.Config / dict Depending on the value of parameter out, either: - the tofu.geom.Config object created - a dictionary of the polygons and their pos/extent (if any) """ if case is not None: conf = _create_config_testcase(config=case, out=out) else: poly, pbump, pbaffle = _compute_VesPoly(R=R, r=r, elong=elong, Dshape=Dshape, divlow=divlow, divup=divup, nP=nP) if out=='dict': conf = {'Ves':{'Poly':poly}, 'Baffle':{'Poly':pbaffle}, 'Bumper':{'Poly':pbump, 'pos':Bump_posextent[0], 'extent':Bump_posextent[1]}} else: ves = _core.Ves(Poly=poly, Type=Type, Lim=Lim, Exp=Exp, Name='Ves', SavePath=SavePath) baf = _core.PFC(Poly=pbaffle, Type=Type, Lim=Lim, Exp=Exp, Name='Baffle', color='b', SavePath=SavePath) bump = _core.PFC(Poly=pbump, Type=Type, pos=Bump_posextent[0], extent=Bump_posextent[1], Exp=Exp, Name='Bumper', color='g', SavePath=SavePath) conf = _core.Config(Name='Dummy', Exp=Exp, lStruct=[ves,baf,bump], SavePath=SavePath) return conf
[ "def", "create_config", "(", "case", "=", "None", ",", "Exp", "=", "'Dummy'", ",", "Type", "=", "'Tor'", ",", "Lim", "=", "None", ",", "Bump_posextent", "=", "[", "np", ".", "pi", "/", "4.", ",", "np", ".", "pi", "/", "4", "]", ",", "R", "=", ...
Create easily a tofu.geom.Config object In tofu, a Config (short for geometrical configuration) refers to the 3D geometry of a fusion device. It includes, at least, a simple 2D polygon describing the first wall of the fusion chamber, and can also include other structural elements (tiles, limiters...) that can be non-axisymmetric. To create a simple Config, provide either the name of a reference test case, of a set of geometrical parameters (major radius, elongation...). This is just a tool for fast testing, if you want to create a custom config, use directly tofu.geom.Config and provide the parameters you want. Parameters ---------- case : str The name of a reference test case, if provided, this arguments is sufficient, the others are ignored Exp : str The name of the experiment Type : str The type of configuration (toroidal 'Tor' or linear 'Lin') Lim_Bump: list The angular (poloidal) limits, in the cross-section of the extension of the outer bumper R : float The major radius of the center of the cross-section r : float The minor radius of the cross-section elong: float An elongation parameter (in [-1;1]) Dshape: float A parameter specifying the D-shape of the cross-section (in [-1;1]) divlow: bool A flag specifying whether to include a lower divertor-like shape divup: bool A flag specifying whether to include an upper divertor-like shape nP: int Number of points used to describe the cross-section polygon out: str FLag indicating whether to return: - 'dict' : the polygons as a dictionary of np.ndarrays - 'object': the configuration as a tofu.geom.Config instance Return ------ conf: tofu.geom.Config / dict Depending on the value of parameter out, either: - the tofu.geom.Config object created - a dictionary of the polygons and their pos/extent (if any)
[ "Create", "easily", "a", "tofu", ".", "geom", ".", "Config", "object" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/utils.py#L690-L771
train
33,070
piglei/uwsgi-sloth
uwsgi_sloth/commands/analyze.py
analyze_log
def analyze_log(fp, configs, url_rules): """Analyze log file""" url_classifier = URLClassifier(url_rules) analyzer = LogAnalyzer(url_classifier=url_classifier, min_msecs=configs.min_msecs) for line in fp: analyzer.analyze_line(line) return analyzer.get_data()
python
def analyze_log(fp, configs, url_rules): """Analyze log file""" url_classifier = URLClassifier(url_rules) analyzer = LogAnalyzer(url_classifier=url_classifier, min_msecs=configs.min_msecs) for line in fp: analyzer.analyze_line(line) return analyzer.get_data()
[ "def", "analyze_log", "(", "fp", ",", "configs", ",", "url_rules", ")", ":", "url_classifier", "=", "URLClassifier", "(", "url_rules", ")", "analyzer", "=", "LogAnalyzer", "(", "url_classifier", "=", "url_classifier", ",", "min_msecs", "=", "configs", ".", "mi...
Analyze log file
[ "Analyze", "log", "file" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/analyze.py#L15-L21
train
33,071
ToFuProject/tofu
tofu/data/_comp.py
_spectrogram_scipy_fourier
def _spectrogram_scipy_fourier(data, fs, nt, nch, fmin=None, window=('tukey', 0.25), deg=False, nperseg=None, noverlap=None, detrend='linear', stft=False, boundary='constant', padded=True, warn=True): """ Return a spectrogram for each channel, and a common frequency vector The min frequency of interest fmin fixes the nb. of pt. per seg. (if None) The number of overlapping points is set to nperseg-1 if None The choice of the window type is a trade-off between: Spectral resolution between similar frequencies/amplitudes: => Dynamic range (lots of != frequencies of != amplitudes): => Compromise: => 'hann' """ # Check inputs if nperseg is None and fmin is None: fmin = _fmin_coef*(fs/nt) if warn: msg = "nperseg and fmin were not provided\n" msg += " => fmin automatically set to 10.*fs/nt:\n" msg += " fmin = 10.*{0} / {1} = {2} Hz".format(fs,nt,fmin) warnings.warn(msg) # Format inputs if nperseg is None: assert fmin > fs/nt nperseg = int(np.ceil(fs/fmin)) if nperseg%2==1: nperseg = nperseg + 1 if noverlap is None: noverlap = nperseg - 1 n = int(np.ceil(np.log(nperseg)/np.log(2))) nfft = 2**n # Prepare output if stft: f, tf, ssx = scpsig.stft(data, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, return_onesided=True, boundary=boundary, padded=padded, axis=0) else: f, tf, ssx = scpsig.spectrogram(data, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, return_onesided=True, scaling='density', axis=0, mode='complex') # Split in list (per channel) lssx = np.split(ssx, np.arange(1,nch), axis=1) lssx = [ss.squeeze().T for ss in lssx] lpsd = [np.abs(ss)**2 for ss in lssx] lang = [np.angle(ss, deg=deg) for ss in lssx] return f, tf, lpsd, lang
python
def _spectrogram_scipy_fourier(data, fs, nt, nch, fmin=None, window=('tukey', 0.25), deg=False, nperseg=None, noverlap=None, detrend='linear', stft=False, boundary='constant', padded=True, warn=True): """ Return a spectrogram for each channel, and a common frequency vector The min frequency of interest fmin fixes the nb. of pt. per seg. (if None) The number of overlapping points is set to nperseg-1 if None The choice of the window type is a trade-off between: Spectral resolution between similar frequencies/amplitudes: => Dynamic range (lots of != frequencies of != amplitudes): => Compromise: => 'hann' """ # Check inputs if nperseg is None and fmin is None: fmin = _fmin_coef*(fs/nt) if warn: msg = "nperseg and fmin were not provided\n" msg += " => fmin automatically set to 10.*fs/nt:\n" msg += " fmin = 10.*{0} / {1} = {2} Hz".format(fs,nt,fmin) warnings.warn(msg) # Format inputs if nperseg is None: assert fmin > fs/nt nperseg = int(np.ceil(fs/fmin)) if nperseg%2==1: nperseg = nperseg + 1 if noverlap is None: noverlap = nperseg - 1 n = int(np.ceil(np.log(nperseg)/np.log(2))) nfft = 2**n # Prepare output if stft: f, tf, ssx = scpsig.stft(data, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, return_onesided=True, boundary=boundary, padded=padded, axis=0) else: f, tf, ssx = scpsig.spectrogram(data, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, return_onesided=True, scaling='density', axis=0, mode='complex') # Split in list (per channel) lssx = np.split(ssx, np.arange(1,nch), axis=1) lssx = [ss.squeeze().T for ss in lssx] lpsd = [np.abs(ss)**2 for ss in lssx] lang = [np.angle(ss, deg=deg) for ss in lssx] return f, tf, lpsd, lang
[ "def", "_spectrogram_scipy_fourier", "(", "data", ",", "fs", ",", "nt", ",", "nch", ",", "fmin", "=", "None", ",", "window", "=", "(", "'tukey'", ",", "0.25", ")", ",", "deg", "=", "False", ",", "nperseg", "=", "None", ",", "noverlap", "=", "None", ...
Return a spectrogram for each channel, and a common frequency vector The min frequency of interest fmin fixes the nb. of pt. per seg. (if None) The number of overlapping points is set to nperseg-1 if None The choice of the window type is a trade-off between: Spectral resolution between similar frequencies/amplitudes: => Dynamic range (lots of != frequencies of != amplitudes): => Compromise: => 'hann'
[ "Return", "a", "spectrogram", "for", "each", "channel", "and", "a", "common", "frequency", "vector" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_comp.py#L70-L130
train
33,072
ToFuProject/tofu
tofu/data/_comp.py
filter_svd
def filter_svd(data, lapack_driver='gesdd', modes=[]): """ Return the svd-filtered signal using only the selected mode Provide the indices of the modes desired """ # Check input modes = np.asarray(modes,dtype=int) assert modes.ndim==1 assert modes.size>=1, "No modes selected !" u, s, v = scplin.svd(data, full_matrices=False, compute_uv=True, overwrite_a=False, check_finite=True, lapack_driver=lapack_driver) indout = np.arange(0,s.size) indout = np.delete(indout, modes) data_in = np.dot(u[:,modes]*s[modes],v[modes,:]) data_out = np.dot(u[:,indout]*s[indout],v[indout,:]) return data_in, data_out
python
def filter_svd(data, lapack_driver='gesdd', modes=[]): """ Return the svd-filtered signal using only the selected mode Provide the indices of the modes desired """ # Check input modes = np.asarray(modes,dtype=int) assert modes.ndim==1 assert modes.size>=1, "No modes selected !" u, s, v = scplin.svd(data, full_matrices=False, compute_uv=True, overwrite_a=False, check_finite=True, lapack_driver=lapack_driver) indout = np.arange(0,s.size) indout = np.delete(indout, modes) data_in = np.dot(u[:,modes]*s[modes],v[modes,:]) data_out = np.dot(u[:,indout]*s[indout],v[indout,:]) return data_in, data_out
[ "def", "filter_svd", "(", "data", ",", "lapack_driver", "=", "'gesdd'", ",", "modes", "=", "[", "]", ")", ":", "# Check input", "modes", "=", "np", ".", "asarray", "(", "modes", ",", "dtype", "=", "int", ")", "assert", "modes", ".", "ndim", "==", "1"...
Return the svd-filtered signal using only the selected mode Provide the indices of the modes desired
[ "Return", "the", "svd", "-", "filtered", "signal", "using", "only", "the", "selected", "mode" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_comp.py#L351-L370
train
33,073
piglei/uwsgi-sloth
uwsgi_sloth/commands/start.py
HTMLRender.render_requests_data_to_html
def render_requests_data_to_html(self, data, file_name, context={}): """Render to HTML file""" file_path = os.path.join(self.html_dir, file_name) logger.info('Rendering HTML file %s...' % file_path) data = format_data(data) data.update(context) data.update(domain=self.domain) with open(file_path, 'w') as fp: fp.write(render_template('realtime.html', data))
python
def render_requests_data_to_html(self, data, file_name, context={}): """Render to HTML file""" file_path = os.path.join(self.html_dir, file_name) logger.info('Rendering HTML file %s...' % file_path) data = format_data(data) data.update(context) data.update(domain=self.domain) with open(file_path, 'w') as fp: fp.write(render_template('realtime.html', data))
[ "def", "render_requests_data_to_html", "(", "self", ",", "data", ",", "file_name", ",", "context", "=", "{", "}", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "html_dir", ",", "file_name", ")", "logger", ".", "info", "(...
Render to HTML file
[ "Render", "to", "HTML", "file" ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/start.py#L26-L34
train
33,074
ToFuProject/tofu
tofu/utils.py
get_figuresize
def get_figuresize(fs, fsdef=(12,6), orient='landscape', method='xrandr'): """ Generic function to return figure size in inches Useful for str-based flags such as: - 'a4' : use orient='portrait' or 'landscape' - 'full': to get screen size use method='xrandr' (recommended), as 'xdpyinfo' tends to be wrong """ assert fs is None or type(fs) in [str,tuple] if fs is None: fs = fsdef elif type(fs) is str: if fs=='a4': fs = (8.27,11.69) if orient=='landscape': fs = (fs[1],fs[0]) elif fs=='full': assert method in ['xrandr','xdpyinfo'] if method=='xrandr': cmd0 = "xrandr" #cmd1 = "grep '*'" out = subprocess.check_output(cmd0.split()) s = [o for o in out.decode('utf-8').split('\n') if 'mm x ' in o] assert len(s)==1 s = [ss for ss in s[0].split(' ') if 'mm' in ss] assert len(s)==2 fsmm = [int(ss.replace('mm','')) for ss in s] else: cmd0 = 'xdpyinfo' out = subprocess.check_output(cmd0.split()) s = [o for o in out.decode('utf-8').split('\n') if 'dimensions' in o] assert len(s)==1 s = s[0][s[0].index('(')+1:s[0].index(' millimeters')] fsmm = [int(ss) for ss in s.split('x')] fs = (fsmm[0]/(10*2.54), fsmm[1]/(10*2.54)) assert type(fs) is tuple and len(fs)==2 return fs
python
def get_figuresize(fs, fsdef=(12,6), orient='landscape', method='xrandr'): """ Generic function to return figure size in inches Useful for str-based flags such as: - 'a4' : use orient='portrait' or 'landscape' - 'full': to get screen size use method='xrandr' (recommended), as 'xdpyinfo' tends to be wrong """ assert fs is None or type(fs) in [str,tuple] if fs is None: fs = fsdef elif type(fs) is str: if fs=='a4': fs = (8.27,11.69) if orient=='landscape': fs = (fs[1],fs[0]) elif fs=='full': assert method in ['xrandr','xdpyinfo'] if method=='xrandr': cmd0 = "xrandr" #cmd1 = "grep '*'" out = subprocess.check_output(cmd0.split()) s = [o for o in out.decode('utf-8').split('\n') if 'mm x ' in o] assert len(s)==1 s = [ss for ss in s[0].split(' ') if 'mm' in ss] assert len(s)==2 fsmm = [int(ss.replace('mm','')) for ss in s] else: cmd0 = 'xdpyinfo' out = subprocess.check_output(cmd0.split()) s = [o for o in out.decode('utf-8').split('\n') if 'dimensions' in o] assert len(s)==1 s = s[0][s[0].index('(')+1:s[0].index(' millimeters')] fsmm = [int(ss) for ss in s.split('x')] fs = (fsmm[0]/(10*2.54), fsmm[1]/(10*2.54)) assert type(fs) is tuple and len(fs)==2 return fs
[ "def", "get_figuresize", "(", "fs", ",", "fsdef", "=", "(", "12", ",", "6", ")", ",", "orient", "=", "'landscape'", ",", "method", "=", "'xrandr'", ")", ":", "assert", "fs", "is", "None", "or", "type", "(", "fs", ")", "in", "[", "str", ",", "tupl...
Generic function to return figure size in inches Useful for str-based flags such as: - 'a4' : use orient='portrait' or 'landscape' - 'full': to get screen size use method='xrandr' (recommended), as 'xdpyinfo' tends to be wrong
[ "Generic", "function", "to", "return", "figure", "size", "in", "inches" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/utils.py#L79-L119
train
33,075
ToFuProject/tofu
tofu/utils.py
_set_arrayorder
def _set_arrayorder(obj, arrayorder='C'): """ Set the memory order of all np.ndarrays in a tofu object """ msg = "Arg arrayorder must be in ['C','F']" assert arrayorder in ['C','F'], msg d = obj.to_dict(strip=-1) account = {'Success':[], 'Failed':[]} for k, v in d.items(): if type(v) is np.array and v.ndim>1: try: if arrayorder=='C': d[k] = np.ascontiguousarray(v) else: d[k] = np.asfortranarray(v) account['Success'].append(k) except Exception as err: warnings.warn(str(err)) account['Failed'].append(k) return d, account
python
def _set_arrayorder(obj, arrayorder='C'): """ Set the memory order of all np.ndarrays in a tofu object """ msg = "Arg arrayorder must be in ['C','F']" assert arrayorder in ['C','F'], msg d = obj.to_dict(strip=-1) account = {'Success':[], 'Failed':[]} for k, v in d.items(): if type(v) is np.array and v.ndim>1: try: if arrayorder=='C': d[k] = np.ascontiguousarray(v) else: d[k] = np.asfortranarray(v) account['Success'].append(k) except Exception as err: warnings.warn(str(err)) account['Failed'].append(k) return d, account
[ "def", "_set_arrayorder", "(", "obj", ",", "arrayorder", "=", "'C'", ")", ":", "msg", "=", "\"Arg arrayorder must be in ['C','F']\"", "assert", "arrayorder", "in", "[", "'C'", ",", "'F'", "]", ",", "msg", "d", "=", "obj", ".", "to_dict", "(", "strip", "=",...
Set the memory order of all np.ndarrays in a tofu object
[ "Set", "the", "memory", "order", "of", "all", "np", ".", "ndarrays", "in", "a", "tofu", "object" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/utils.py#L214-L233
train
33,076
ToFuProject/tofu
tofu/utils.py
save
def save(obj, path=None, name=None, sep=_sep, deep=False, mode='npz', strip=None, compressed=False, verb=True, return_pfe=False): """ Save the ToFu object ToFu provides built-in saving and loading functions for ToFu objects. Specifying saving path ad name is optional (defaults recommended) The mode refers to the file format Good practices are: - save all struct objects Parameters ---------- obj : ToFuObject subclass instance The object to be saved path : None / str The folder where to save, if None (recommended), uses obj.Id.SavePath name : None / str The file name, if None (recommended), uses obj.Id.SaveName mode : str Flag specifying the saving mode - 'npz': numpy file - 'mat': matlab file strip: int Flag indicating how stripped the saved object should be See docstring of self.strip() deep: bool Flag, used when the object has other tofu objects as attributes Indicated whether these attribute object should be: - True: converted to dict themselves in order to be saved inside the same file as attributes (-> uses self.to_dict(deep='dict')) - False: not converted, in that the strategy would be to save them separately and store only the reference to the saved files instead of the objects themselves. To do this, you must: 1/ Save all object attributes independently 2/ Store only the reference by doing self.strip(-1) The strip() method will check they have been saved before removing them, and throw an Exception otherwise 3/ self.save(deep=False) compressed : bool Flag indicating whether to compress the file (slower, not recommended) verb : bool Flag indicating whether to print a summary (recommended) """ msg = "Arg obj must be a tofu subclass instance !" assert issubclass(obj.__class__, ToFuObject), msg msg = "Arg path must be None or a str (folder) !" assert path is None or isinstance(path,str), msg msg = "Arg name must be None or a str (file name) !" assert name is None or isinstance(name,str), msg msg = "Arg mode must be in ['npz','mat'] !" assert mode in ['npz','mat'], msg msg = "Arg compressed must be a bool !" assert type(compressed) is bool, msg msg = "Arg verb must be a bool !" assert type(verb) is bool, msg # Check path, name, mode path, name, mode = get_pathfileext(path=path, name=name, path_def=obj.Id.SavePath, name_def=obj.Id.SaveName, mode=mode) # Update self._Id fields obj._Id._SavePath = path if name!=obj.Id.SaveName: obj._Id.set_SaveName(name) # Get stripped dictionnary deep = 'dict' if deep else 'ref' dd = obj.to_dict(strip=strip, sep=sep, deep=deep) pathfileext = os.path.join(path,name+'.'+mode) if mode=='npz': _save_npz(dd, pathfileext, compressed=compressed) elif mode=='mat': _save_mat(dd, pathfileext, compressed=compressed) # print if verb: msg = "Saved in :\n" msg += " "+pathfileext print(msg) if return_pfe: return pathfileext
python
def save(obj, path=None, name=None, sep=_sep, deep=False, mode='npz', strip=None, compressed=False, verb=True, return_pfe=False): """ Save the ToFu object ToFu provides built-in saving and loading functions for ToFu objects. Specifying saving path ad name is optional (defaults recommended) The mode refers to the file format Good practices are: - save all struct objects Parameters ---------- obj : ToFuObject subclass instance The object to be saved path : None / str The folder where to save, if None (recommended), uses obj.Id.SavePath name : None / str The file name, if None (recommended), uses obj.Id.SaveName mode : str Flag specifying the saving mode - 'npz': numpy file - 'mat': matlab file strip: int Flag indicating how stripped the saved object should be See docstring of self.strip() deep: bool Flag, used when the object has other tofu objects as attributes Indicated whether these attribute object should be: - True: converted to dict themselves in order to be saved inside the same file as attributes (-> uses self.to_dict(deep='dict')) - False: not converted, in that the strategy would be to save them separately and store only the reference to the saved files instead of the objects themselves. To do this, you must: 1/ Save all object attributes independently 2/ Store only the reference by doing self.strip(-1) The strip() method will check they have been saved before removing them, and throw an Exception otherwise 3/ self.save(deep=False) compressed : bool Flag indicating whether to compress the file (slower, not recommended) verb : bool Flag indicating whether to print a summary (recommended) """ msg = "Arg obj must be a tofu subclass instance !" assert issubclass(obj.__class__, ToFuObject), msg msg = "Arg path must be None or a str (folder) !" assert path is None or isinstance(path,str), msg msg = "Arg name must be None or a str (file name) !" assert name is None or isinstance(name,str), msg msg = "Arg mode must be in ['npz','mat'] !" assert mode in ['npz','mat'], msg msg = "Arg compressed must be a bool !" assert type(compressed) is bool, msg msg = "Arg verb must be a bool !" assert type(verb) is bool, msg # Check path, name, mode path, name, mode = get_pathfileext(path=path, name=name, path_def=obj.Id.SavePath, name_def=obj.Id.SaveName, mode=mode) # Update self._Id fields obj._Id._SavePath = path if name!=obj.Id.SaveName: obj._Id.set_SaveName(name) # Get stripped dictionnary deep = 'dict' if deep else 'ref' dd = obj.to_dict(strip=strip, sep=sep, deep=deep) pathfileext = os.path.join(path,name+'.'+mode) if mode=='npz': _save_npz(dd, pathfileext, compressed=compressed) elif mode=='mat': _save_mat(dd, pathfileext, compressed=compressed) # print if verb: msg = "Saved in :\n" msg += " "+pathfileext print(msg) if return_pfe: return pathfileext
[ "def", "save", "(", "obj", ",", "path", "=", "None", ",", "name", "=", "None", ",", "sep", "=", "_sep", ",", "deep", "=", "False", ",", "mode", "=", "'npz'", ",", "strip", "=", "None", ",", "compressed", "=", "False", ",", "verb", "=", "True", ...
Save the ToFu object ToFu provides built-in saving and loading functions for ToFu objects. Specifying saving path ad name is optional (defaults recommended) The mode refers to the file format Good practices are: - save all struct objects Parameters ---------- obj : ToFuObject subclass instance The object to be saved path : None / str The folder where to save, if None (recommended), uses obj.Id.SavePath name : None / str The file name, if None (recommended), uses obj.Id.SaveName mode : str Flag specifying the saving mode - 'npz': numpy file - 'mat': matlab file strip: int Flag indicating how stripped the saved object should be See docstring of self.strip() deep: bool Flag, used when the object has other tofu objects as attributes Indicated whether these attribute object should be: - True: converted to dict themselves in order to be saved inside the same file as attributes (-> uses self.to_dict(deep='dict')) - False: not converted, in that the strategy would be to save them separately and store only the reference to the saved files instead of the objects themselves. To do this, you must: 1/ Save all object attributes independently 2/ Store only the reference by doing self.strip(-1) The strip() method will check they have been saved before removing them, and throw an Exception otherwise 3/ self.save(deep=False) compressed : bool Flag indicating whether to compress the file (slower, not recommended) verb : bool Flag indicating whether to print a summary (recommended)
[ "Save", "the", "ToFu", "object" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/utils.py#L241-L328
train
33,077
ToFuProject/tofu
tofu/utils.py
load
def load(name, path=None, strip=None, verb=True): """ Load a tofu object file Can load from .npz or .txt files In future versions, will also load from .mat The file must have been saved with tofu (i.e.: must be tofu-formatted) The associated tofu object will be created and returned Parameters ---------- name: str Name of the file to load from, can include the path path: None / str Path where the file is located (if not provided in name), defaults './' strip: None / int FLag indicating whether to strip the object of some attributes => see the docstring of the class strip() method for details verb: bool Flag indocating whether to print a summary of the loaded file """ lmodes = ['.npz','.mat','.txt'] name, mode, pfe = _filefind(name=name, path=path, lmodes=lmodes) if mode == 'txt': obj = _load_from_txt(name, pfe) else: if mode == 'npz': dd = _load_npz(pfe) elif mode == 'mat': dd = _load_mat(pfe) # Recreate from dict exec("import tofu.{0} as mod".format(dd['dId_dall_Mod'])) obj = eval("mod.{0}(fromdict=dd)".format(dd['dId_dall_Cls'])) if strip is not None: obj.strip(strip=strip) # print if verb: msg = "Loaded from:\n" msg += " "+pfe print(msg) return obj
python
def load(name, path=None, strip=None, verb=True): """ Load a tofu object file Can load from .npz or .txt files In future versions, will also load from .mat The file must have been saved with tofu (i.e.: must be tofu-formatted) The associated tofu object will be created and returned Parameters ---------- name: str Name of the file to load from, can include the path path: None / str Path where the file is located (if not provided in name), defaults './' strip: None / int FLag indicating whether to strip the object of some attributes => see the docstring of the class strip() method for details verb: bool Flag indocating whether to print a summary of the loaded file """ lmodes = ['.npz','.mat','.txt'] name, mode, pfe = _filefind(name=name, path=path, lmodes=lmodes) if mode == 'txt': obj = _load_from_txt(name, pfe) else: if mode == 'npz': dd = _load_npz(pfe) elif mode == 'mat': dd = _load_mat(pfe) # Recreate from dict exec("import tofu.{0} as mod".format(dd['dId_dall_Mod'])) obj = eval("mod.{0}(fromdict=dd)".format(dd['dId_dall_Cls'])) if strip is not None: obj.strip(strip=strip) # print if verb: msg = "Loaded from:\n" msg += " "+pfe print(msg) return obj
[ "def", "load", "(", "name", ",", "path", "=", "None", ",", "strip", "=", "None", ",", "verb", "=", "True", ")", ":", "lmodes", "=", "[", "'.npz'", ",", "'.mat'", ",", "'.txt'", "]", "name", ",", "mode", ",", "pfe", "=", "_filefind", "(", "name", ...
Load a tofu object file Can load from .npz or .txt files In future versions, will also load from .mat The file must have been saved with tofu (i.e.: must be tofu-formatted) The associated tofu object will be created and returned Parameters ---------- name: str Name of the file to load from, can include the path path: None / str Path where the file is located (if not provided in name), defaults './' strip: None / int FLag indicating whether to strip the object of some attributes => see the docstring of the class strip() method for details verb: bool Flag indocating whether to print a summary of the loaded file
[ "Load", "a", "tofu", "object", "file" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/utils.py#L442-L487
train
33,078
ToFuProject/tofu
tofu/utils.py
ToFuObjectBase.to_dict
def to_dict(self, strip=None, sep=_sep, deep='ref'): """ Return a flat dict view of the object's attributes Useful for: * displaying all attributes * saving to file * exchaning data with non-tofu libraries Parameters ---------- strip : int Flag indicating how stripped the object should be Fed to self.strip() sep : str Separator char used for flattening the dict The output dict is flat (i.e.: no nested dict) Keys are created from the keys of nested dict, separated by sep deep: str Flag indicating how to behave when an attribute is itself a tofu object. The associated field in the exported dict can be: - 'ref' : a simple reference to the object - 'copy': a tofu object itself (i.e.: a copy of the original) - 'dict': the tofu object is itself exported as a dict (using also self.to_dict()) Return ------ dout : dict Flat dict containing all the objects attributes """ if deep not in ['ref','copy','dict']: msg = "Arg deep must be a flag in ['ref','copy','dict'] !" raise Exception(msg) if strip is None: strip = self._dstrip['strip'] if self._dstrip['strip'] != strip: self.strip(strip) # --------------------- # Call class-specific dd = self._to_dict() # --------------------- dd['dId'] = self._get_dId() dd['dstrip'] = {'dict':self._dstrip, 'lexcept':None} dout = {} for k, v in dd.items(): lexcept_key = v.get('lexcept_key', None) try: d = flatten_dict(v['dict'], parent_key='', sep=sep, deep=deep, lexcept_key=lexcept_key) except Exception as err: msg = str(err) msg += "\nIssue flattening dict %s"%k msg += "\n\n\n" + str(v['dict']) raise Exception(msg) dout[k] = d dout = flatten_dict(dout, parent_key='', sep=sep, deep=deep) return dout
python
def to_dict(self, strip=None, sep=_sep, deep='ref'): """ Return a flat dict view of the object's attributes Useful for: * displaying all attributes * saving to file * exchaning data with non-tofu libraries Parameters ---------- strip : int Flag indicating how stripped the object should be Fed to self.strip() sep : str Separator char used for flattening the dict The output dict is flat (i.e.: no nested dict) Keys are created from the keys of nested dict, separated by sep deep: str Flag indicating how to behave when an attribute is itself a tofu object. The associated field in the exported dict can be: - 'ref' : a simple reference to the object - 'copy': a tofu object itself (i.e.: a copy of the original) - 'dict': the tofu object is itself exported as a dict (using also self.to_dict()) Return ------ dout : dict Flat dict containing all the objects attributes """ if deep not in ['ref','copy','dict']: msg = "Arg deep must be a flag in ['ref','copy','dict'] !" raise Exception(msg) if strip is None: strip = self._dstrip['strip'] if self._dstrip['strip'] != strip: self.strip(strip) # --------------------- # Call class-specific dd = self._to_dict() # --------------------- dd['dId'] = self._get_dId() dd['dstrip'] = {'dict':self._dstrip, 'lexcept':None} dout = {} for k, v in dd.items(): lexcept_key = v.get('lexcept_key', None) try: d = flatten_dict(v['dict'], parent_key='', sep=sep, deep=deep, lexcept_key=lexcept_key) except Exception as err: msg = str(err) msg += "\nIssue flattening dict %s"%k msg += "\n\n\n" + str(v['dict']) raise Exception(msg) dout[k] = d dout = flatten_dict(dout, parent_key='', sep=sep, deep=deep) return dout
[ "def", "to_dict", "(", "self", ",", "strip", "=", "None", ",", "sep", "=", "_sep", ",", "deep", "=", "'ref'", ")", ":", "if", "deep", "not", "in", "[", "'ref'", ",", "'copy'", ",", "'dict'", "]", ":", "msg", "=", "\"Arg deep must be a flag in ['ref','c...
Return a flat dict view of the object's attributes Useful for: * displaying all attributes * saving to file * exchaning data with non-tofu libraries Parameters ---------- strip : int Flag indicating how stripped the object should be Fed to self.strip() sep : str Separator char used for flattening the dict The output dict is flat (i.e.: no nested dict) Keys are created from the keys of nested dict, separated by sep deep: str Flag indicating how to behave when an attribute is itself a tofu object. The associated field in the exported dict can be: - 'ref' : a simple reference to the object - 'copy': a tofu object itself (i.e.: a copy of the original) - 'dict': the tofu object is itself exported as a dict (using also self.to_dict()) Return ------ dout : dict Flat dict containing all the objects attributes
[ "Return", "a", "flat", "dict", "view", "of", "the", "object", "s", "attributes" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/utils.py#L826-L886
train
33,079
ToFuProject/tofu
tofu/utils.py
ToFuObjectBase.from_dict
def from_dict(self, fd, sep=_sep, strip=None): """ Populate the instances attributes using an input dict The input dict must be properly formatted In practice it should be the return output of a similar class to_dict() Parameters ---------- fd : dict The properly formatted ditionnary from which to read the attributes sep : str The separator that was used to format fd keys (cf. self.to_dict()) strip : int Flag indicating how stripped the resulting object shouyld be (cf. self.strip()) """ self._reset() dd = reshape_dict(fd) # --------------------- # Call class-specific self._from_dict(dd) # --------------------- self._dstrip.update(**dd['dstrip']) if 'dId' in dd.keys(): self._set_Id(Id=ID(fromdict=dd['dId'])) if strip is None: strip = self._dstrip['strip'] if self._dstrip['strip'] != strip: self.strip(strip, verb=verb)
python
def from_dict(self, fd, sep=_sep, strip=None): """ Populate the instances attributes using an input dict The input dict must be properly formatted In practice it should be the return output of a similar class to_dict() Parameters ---------- fd : dict The properly formatted ditionnary from which to read the attributes sep : str The separator that was used to format fd keys (cf. self.to_dict()) strip : int Flag indicating how stripped the resulting object shouyld be (cf. self.strip()) """ self._reset() dd = reshape_dict(fd) # --------------------- # Call class-specific self._from_dict(dd) # --------------------- self._dstrip.update(**dd['dstrip']) if 'dId' in dd.keys(): self._set_Id(Id=ID(fromdict=dd['dId'])) if strip is None: strip = self._dstrip['strip'] if self._dstrip['strip'] != strip: self.strip(strip, verb=verb)
[ "def", "from_dict", "(", "self", ",", "fd", ",", "sep", "=", "_sep", ",", "strip", "=", "None", ")", ":", "self", ".", "_reset", "(", ")", "dd", "=", "reshape_dict", "(", "fd", ")", "# ---------------------", "# Call class-specific", "self", ".", "_from_...
Populate the instances attributes using an input dict The input dict must be properly formatted In practice it should be the return output of a similar class to_dict() Parameters ---------- fd : dict The properly formatted ditionnary from which to read the attributes sep : str The separator that was used to format fd keys (cf. self.to_dict()) strip : int Flag indicating how stripped the resulting object shouyld be (cf. self.strip())
[ "Populate", "the", "instances", "attributes", "using", "an", "input", "dict" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/utils.py#L892-L923
train
33,080
ToFuProject/tofu
tofu/utils.py
ToFuObjectBase.copy
def copy(self, strip=None, deep='ref'): """ Return another instance of the object, with the same attributes If deep=True, all attributes themselves are also copies """ dd = self.to_dict(strip=strip, deep=deep) return self.__class__(fromdict=dd)
python
def copy(self, strip=None, deep='ref'): """ Return another instance of the object, with the same attributes If deep=True, all attributes themselves are also copies """ dd = self.to_dict(strip=strip, deep=deep) return self.__class__(fromdict=dd)
[ "def", "copy", "(", "self", ",", "strip", "=", "None", ",", "deep", "=", "'ref'", ")", ":", "dd", "=", "self", ".", "to_dict", "(", "strip", "=", "strip", ",", "deep", "=", "deep", ")", "return", "self", ".", "__class__", "(", "fromdict", "=", "d...
Return another instance of the object, with the same attributes If deep=True, all attributes themselves are also copies
[ "Return", "another", "instance", "of", "the", "object", "with", "the", "same", "attributes" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/utils.py#L925-L931
train
33,081
ToFuProject/tofu
tofu/utils.py
ID.set_lObj
def set_lObj(self, lObj=None): """ Set the lObj attribute, storing objects the instance depends on For example: A Detect object depends on a vessel and some apertures That link between should be stored somewhere (for saving/loading). lObj does this: it stores the ID (as dict) of all objects depended on. Parameters ---------- lObj : None / dict / :class:`~tofu.pathfile.ID` / list of such Provide either: - A dict (derived from :meth:`~tofu.pathfile.ID._todict`) - A :class:`~tofu.pathfile.ID` instance - A list of dict or :class:`~tofu.pathfile.ID` instances """ if self.lObj is None and lObj is not None: self._dall['lObj'] = {} if lObj is not None: if type(lObj) is not list: lObj = [lObj] for ii in range(0,len(lObj)): if type(lObj[ii]) is ID: lObj[ii] = lObj[ii].to_dict() ClsU = list(set([oo['Cls'] for oo in lObj])) for c in ClsU: self._dall['lObj'][c] = [oo for oo in lObj if oo['Cls']==c]
python
def set_lObj(self, lObj=None): """ Set the lObj attribute, storing objects the instance depends on For example: A Detect object depends on a vessel and some apertures That link between should be stored somewhere (for saving/loading). lObj does this: it stores the ID (as dict) of all objects depended on. Parameters ---------- lObj : None / dict / :class:`~tofu.pathfile.ID` / list of such Provide either: - A dict (derived from :meth:`~tofu.pathfile.ID._todict`) - A :class:`~tofu.pathfile.ID` instance - A list of dict or :class:`~tofu.pathfile.ID` instances """ if self.lObj is None and lObj is not None: self._dall['lObj'] = {} if lObj is not None: if type(lObj) is not list: lObj = [lObj] for ii in range(0,len(lObj)): if type(lObj[ii]) is ID: lObj[ii] = lObj[ii].to_dict() ClsU = list(set([oo['Cls'] for oo in lObj])) for c in ClsU: self._dall['lObj'][c] = [oo for oo in lObj if oo['Cls']==c]
[ "def", "set_lObj", "(", "self", ",", "lObj", "=", "None", ")", ":", "if", "self", ".", "lObj", "is", "None", "and", "lObj", "is", "not", "None", ":", "self", ".", "_dall", "[", "'lObj'", "]", "=", "{", "}", "if", "lObj", "is", "not", "None", ":...
Set the lObj attribute, storing objects the instance depends on For example: A Detect object depends on a vessel and some apertures That link between should be stored somewhere (for saving/loading). lObj does this: it stores the ID (as dict) of all objects depended on. Parameters ---------- lObj : None / dict / :class:`~tofu.pathfile.ID` / list of such Provide either: - A dict (derived from :meth:`~tofu.pathfile.ID._todict`) - A :class:`~tofu.pathfile.ID` instance - A list of dict or :class:`~tofu.pathfile.ID` instances
[ "Set", "the", "lObj", "attribute", "storing", "objects", "the", "instance", "depends", "on" ]
39d6b2e7ced9e13666572dfd37e19403f1d6ff8d
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/utils.py#L1587-L1614
train
33,082
piglei/uwsgi-sloth
uwsgi_sloth/tailer.py
Tailer.seek_line_forward
def seek_line_forward(self): """\ Searches forward from the current file position for a line terminator and seeks to the charachter after it. """ pos = start_pos = self.file.tell() bytes_read, read_str = self.read(self.read_size) start = 0 if bytes_read and read_str[0] in self.line_terminators: # The first charachter is a line terminator, don't count this one start += 1 while bytes_read > 0: # Scan forwards, counting the newlines in this bufferfull i = start while i < bytes_read: if read_str[i] in self.line_terminators: self.seek(pos + i + 1) return self.file.tell() i += 1 pos += self.read_size self.seek(pos) bytes_read, read_str = self.read(self.read_size) return None
python
def seek_line_forward(self): """\ Searches forward from the current file position for a line terminator and seeks to the charachter after it. """ pos = start_pos = self.file.tell() bytes_read, read_str = self.read(self.read_size) start = 0 if bytes_read and read_str[0] in self.line_terminators: # The first charachter is a line terminator, don't count this one start += 1 while bytes_read > 0: # Scan forwards, counting the newlines in this bufferfull i = start while i < bytes_read: if read_str[i] in self.line_terminators: self.seek(pos + i + 1) return self.file.tell() i += 1 pos += self.read_size self.seek(pos) bytes_read, read_str = self.read(self.read_size) return None
[ "def", "seek_line_forward", "(", "self", ")", ":", "pos", "=", "start_pos", "=", "self", ".", "file", ".", "tell", "(", ")", "bytes_read", ",", "read_str", "=", "self", ".", "read", "(", "self", ".", "read_size", ")", "start", "=", "0", "if", "bytes_...
\ Searches forward from the current file position for a line terminator and seeks to the charachter after it.
[ "\\", "Searches", "forward", "from", "the", "current", "file", "position", "for", "a", "line", "terminator", "and", "seeks", "to", "the", "charachter", "after", "it", "." ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L53-L81
train
33,083
piglei/uwsgi-sloth
uwsgi_sloth/tailer.py
Tailer.seek_line
def seek_line(self): """\ Searches backwards from the current file position for a line terminator and seeks to the charachter after it. """ pos = end_pos = self.file.tell() read_size = self.read_size if pos > read_size: pos -= read_size else: pos = 0 read_size = end_pos self.seek(pos) bytes_read, read_str = self.read(read_size) if bytes_read and read_str[-1] in self.line_terminators: # The last charachter is a line terminator, don't count this one bytes_read -= 1 if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators: # found crlf bytes_read -= 1 while bytes_read > 0: # Scan backward, counting the newlines in this bufferfull i = bytes_read - 1 while i >= 0: if read_str[i] in self.line_terminators: self.seek(pos + i + 1) return self.file.tell() i -= 1 if pos == 0 or pos - self.read_size < 0: # Not enought lines in the buffer, send the whole file self.seek(0) return None pos -= self.read_size self.seek(pos) bytes_read, read_str = self.read(self.read_size) return None
python
def seek_line(self): """\ Searches backwards from the current file position for a line terminator and seeks to the charachter after it. """ pos = end_pos = self.file.tell() read_size = self.read_size if pos > read_size: pos -= read_size else: pos = 0 read_size = end_pos self.seek(pos) bytes_read, read_str = self.read(read_size) if bytes_read and read_str[-1] in self.line_terminators: # The last charachter is a line terminator, don't count this one bytes_read -= 1 if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators: # found crlf bytes_read -= 1 while bytes_read > 0: # Scan backward, counting the newlines in this bufferfull i = bytes_read - 1 while i >= 0: if read_str[i] in self.line_terminators: self.seek(pos + i + 1) return self.file.tell() i -= 1 if pos == 0 or pos - self.read_size < 0: # Not enought lines in the buffer, send the whole file self.seek(0) return None pos -= self.read_size self.seek(pos) bytes_read, read_str = self.read(self.read_size) return None
[ "def", "seek_line", "(", "self", ")", ":", "pos", "=", "end_pos", "=", "self", ".", "file", ".", "tell", "(", ")", "read_size", "=", "self", ".", "read_size", "if", "pos", ">", "read_size", ":", "pos", "-=", "read_size", "else", ":", "pos", "=", "0...
\ Searches backwards from the current file position for a line terminator and seeks to the charachter after it.
[ "\\", "Searches", "backwards", "from", "the", "current", "file", "position", "for", "a", "line", "terminator", "and", "seeks", "to", "the", "charachter", "after", "it", "." ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L83-L128
train
33,084
piglei/uwsgi-sloth
uwsgi_sloth/tailer.py
Tailer.tail
def tail(self, lines=10): """\ Return the last lines of the file. """ self.seek_end() end_pos = self.file.tell() for i in range(lines): if not self.seek_line(): break data = self.file.read(end_pos - self.file.tell() - 1) if data: return self.splitlines(data) else: return []
python
def tail(self, lines=10): """\ Return the last lines of the file. """ self.seek_end() end_pos = self.file.tell() for i in range(lines): if not self.seek_line(): break data = self.file.read(end_pos - self.file.tell() - 1) if data: return self.splitlines(data) else: return []
[ "def", "tail", "(", "self", ",", "lines", "=", "10", ")", ":", "self", ".", "seek_end", "(", ")", "end_pos", "=", "self", ".", "file", ".", "tell", "(", ")", "for", "i", "in", "range", "(", "lines", ")", ":", "if", "not", "self", ".", "seek_lin...
\ Return the last lines of the file.
[ "\\", "Return", "the", "last", "lines", "of", "the", "file", "." ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L130-L145
train
33,085
piglei/uwsgi-sloth
uwsgi_sloth/tailer.py
Tailer.head
def head(self, lines=10): """\ Return the top lines of the file. """ self.seek(0) for i in range(lines): if not self.seek_line_forward(): break end_pos = self.file.tell() self.seek(0) data = self.file.read(end_pos - 1) if data: return self.splitlines(data) else: return []
python
def head(self, lines=10): """\ Return the top lines of the file. """ self.seek(0) for i in range(lines): if not self.seek_line_forward(): break end_pos = self.file.tell() self.seek(0) data = self.file.read(end_pos - 1) if data: return self.splitlines(data) else: return []
[ "def", "head", "(", "self", ",", "lines", "=", "10", ")", ":", "self", ".", "seek", "(", "0", ")", "for", "i", "in", "range", "(", "lines", ")", ":", "if", "not", "self", ".", "seek_line_forward", "(", ")", ":", "break", "end_pos", "=", "self", ...
\ Return the top lines of the file.
[ "\\", "Return", "the", "top", "lines", "of", "the", "file", "." ]
2834ac5ed17d89ca5f19151c649ac610f6f37bd1
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L147-L165
train
33,086
rodricios/eatiht
eatiht/v2.py
get_html_tree
def get_html_tree(filename_url_or_filelike): """From some file path, input stream, or URL, construct and return an HTML tree. """ try: handler = ( HTTPSHandler if filename_url_or_filelike.lower().startswith('https') else HTTPHandler ) cj = CookieJar() opener = build_opener(handler) opener.add_handler(HTTPCookieProcessor(cj)) resp = opener.open(filename_url_or_filelike) except(AttributeError): content = filename_url_or_filelike.read() encoding = chardet.detect(content)['encoding'] parsed_html = html.parse(BytesIO(content), html.HTMLParser(encoding=encoding, remove_blank_text=True)) return parsed_html except(ValueError): content = filename_url_or_filelike encoding = chardet.detect(content)['encoding'] parsed_html = html.parse(BytesIO(content), html.HTMLParser(encoding=encoding, remove_blank_text=True)) return parsed_html try: content = resp.read() finally: resp.close() encoding = chardet.detect(content)['encoding'] parsed_html = html.parse(BytesIO(content), html.HTMLParser(encoding=encoding, remove_blank_text=True)) return parsed_html
python
def get_html_tree(filename_url_or_filelike): """From some file path, input stream, or URL, construct and return an HTML tree. """ try: handler = ( HTTPSHandler if filename_url_or_filelike.lower().startswith('https') else HTTPHandler ) cj = CookieJar() opener = build_opener(handler) opener.add_handler(HTTPCookieProcessor(cj)) resp = opener.open(filename_url_or_filelike) except(AttributeError): content = filename_url_or_filelike.read() encoding = chardet.detect(content)['encoding'] parsed_html = html.parse(BytesIO(content), html.HTMLParser(encoding=encoding, remove_blank_text=True)) return parsed_html except(ValueError): content = filename_url_or_filelike encoding = chardet.detect(content)['encoding'] parsed_html = html.parse(BytesIO(content), html.HTMLParser(encoding=encoding, remove_blank_text=True)) return parsed_html try: content = resp.read() finally: resp.close() encoding = chardet.detect(content)['encoding'] parsed_html = html.parse(BytesIO(content), html.HTMLParser(encoding=encoding, remove_blank_text=True)) return parsed_html
[ "def", "get_html_tree", "(", "filename_url_or_filelike", ")", ":", "try", ":", "handler", "=", "(", "HTTPSHandler", "if", "filename_url_or_filelike", ".", "lower", "(", ")", ".", "startswith", "(", "'https'", ")", "else", "HTTPHandler", ")", "cj", "=", "Cookie...
From some file path, input stream, or URL, construct and return an HTML tree.
[ "From", "some", "file", "path", "input", "stream", "or", "URL", "construct", "and", "return", "an", "HTML", "tree", "." ]
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L113-L158
train
33,087
rodricios/eatiht
eatiht/v2.py
calc_across_paths_textnodes
def calc_across_paths_textnodes(paths_nodes, dbg=False): """Given a list of parent paths tupled with children textnodes, plus initialized feature values, we calculate the total and average string length of the parent's children textnodes. """ # for (path, [textnodes], # num. of tnodes, # ttl strlen across tnodes, # avg strlen across tnodes.]) for path_nodes in paths_nodes: cnt = len(path_nodes[1][0]) ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total path_nodes[1][1] = cnt # cardinality path_nodes[1][2] = ttl # total path_nodes[1][3] = ttl/ cnt # average if dbg: print(path_nodes[1])
python
def calc_across_paths_textnodes(paths_nodes, dbg=False): """Given a list of parent paths tupled with children textnodes, plus initialized feature values, we calculate the total and average string length of the parent's children textnodes. """ # for (path, [textnodes], # num. of tnodes, # ttl strlen across tnodes, # avg strlen across tnodes.]) for path_nodes in paths_nodes: cnt = len(path_nodes[1][0]) ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total path_nodes[1][1] = cnt # cardinality path_nodes[1][2] = ttl # total path_nodes[1][3] = ttl/ cnt # average if dbg: print(path_nodes[1])
[ "def", "calc_across_paths_textnodes", "(", "paths_nodes", ",", "dbg", "=", "False", ")", ":", "# for (path, [textnodes],\r", "# num. of tnodes,\r", "# ttl strlen across tnodes,\r", "# avg strlen across tnodes.])\r", "for", "path_nodes", "in", "paths_no...
Given a list of parent paths tupled with children textnodes, plus initialized feature values, we calculate the total and average string length of the parent's children textnodes.
[ "Given", "a", "list", "of", "parent", "paths", "tupled", "with", "children", "textnodes", "plus", "initialized", "feature", "values", "we", "calculate", "the", "total", "and", "average", "string", "length", "of", "the", "parent", "s", "children", "textnodes", ...
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L194-L211
train
33,088
rodricios/eatiht
eatiht/v2.py
extract
def extract(filename_url_or_filelike): """A more precise algorithm over the original eatiht algorithm """ pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike) #[iterable, cardinality, ttl across iterable, avg across iterable.]) calc_across_paths_textnodes(pars_tnodes) avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes) filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes if parpath_tnodes[1][2] > avg] paths = [path for path, tnode in filtered] hist = get_xpath_frequencydistribution(paths) try: target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par] target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes]) return target_text except IndexError: return ""
python
def extract(filename_url_or_filelike): """A more precise algorithm over the original eatiht algorithm """ pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike) #[iterable, cardinality, ttl across iterable, avg across iterable.]) calc_across_paths_textnodes(pars_tnodes) avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes) filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes if parpath_tnodes[1][2] > avg] paths = [path for path, tnode in filtered] hist = get_xpath_frequencydistribution(paths) try: target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par] target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes]) return target_text except IndexError: return ""
[ "def", "extract", "(", "filename_url_or_filelike", ")", ":", "pars_tnodes", "=", "get_parent_xpaths_and_textnodes", "(", "filename_url_or_filelike", ")", "#[iterable, cardinality, ttl across iterable, avg across iterable.])\r", "calc_across_paths_textnodes", "(", "pars_tnodes", ")", ...
A more precise algorithm over the original eatiht algorithm
[ "A", "more", "precise", "algorithm", "over", "the", "original", "eatiht", "algorithm" ]
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L253-L275
train
33,089
rodricios/eatiht
eatiht/etv2.py
extract
def extract(filename_url_filelike_or_htmlstring): """An "improved" algorithm over the original eatiht algorithm """ html_tree = get_html_tree(filename_url_filelike_or_htmlstring) subtrees = get_textnode_subtrees(html_tree) #[iterable, cardinality, ttl across iterable, avg across iterable.]) # calculate AABSL avg, _, _ = calcavg_avgstrlen_subtrees(subtrees) # "high-pass" filter filtered = [subtree for subtree in subtrees if subtree.ttl_strlen > avg] paths = [subtree.parent_path for subtree in filtered] hist = get_xpath_frequencydistribution(paths) target_subtrees = [stree for stree in subtrees if hist[0][0] in stree.parent_path] title = html_tree.find(".//title") return TextNodeTree(title.text_content(), target_subtrees, hist)
python
def extract(filename_url_filelike_or_htmlstring): """An "improved" algorithm over the original eatiht algorithm """ html_tree = get_html_tree(filename_url_filelike_or_htmlstring) subtrees = get_textnode_subtrees(html_tree) #[iterable, cardinality, ttl across iterable, avg across iterable.]) # calculate AABSL avg, _, _ = calcavg_avgstrlen_subtrees(subtrees) # "high-pass" filter filtered = [subtree for subtree in subtrees if subtree.ttl_strlen > avg] paths = [subtree.parent_path for subtree in filtered] hist = get_xpath_frequencydistribution(paths) target_subtrees = [stree for stree in subtrees if hist[0][0] in stree.parent_path] title = html_tree.find(".//title") return TextNodeTree(title.text_content(), target_subtrees, hist)
[ "def", "extract", "(", "filename_url_filelike_or_htmlstring", ")", ":", "html_tree", "=", "get_html_tree", "(", "filename_url_filelike_or_htmlstring", ")", "subtrees", "=", "get_textnode_subtrees", "(", "html_tree", ")", "#[iterable, cardinality, ttl across iterable, avg across i...
An "improved" algorithm over the original eatiht algorithm
[ "An", "improved", "algorithm", "over", "the", "original", "eatiht", "algorithm" ]
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/etv2.py#L238-L262
train
33,090
rodricios/eatiht
eatiht/eatiht_trees.py
TextNodeSubTree.__learn_oneself
def __learn_oneself(self): """calculate cardinality, total and average string length""" if not self.__parent_path or not self.__text_nodes: raise Exception("This error occurred because the step constructor\ had insufficient textnodes or it had empty string\ for its parent xpath") # Iterate through text nodes and sum up text length # TODO: consider naming this child_count or cardinality # or branch_cnt self.tnodes_cnt = len(self.__text_nodes) # consider naming this total self.ttl_strlen = sum([len(tnode) for tnode in self.__text_nodes]) # consider naming this average self.avg_strlen = self.ttl_strlen/self.tnodes_cnt
python
def __learn_oneself(self): """calculate cardinality, total and average string length""" if not self.__parent_path or not self.__text_nodes: raise Exception("This error occurred because the step constructor\ had insufficient textnodes or it had empty string\ for its parent xpath") # Iterate through text nodes and sum up text length # TODO: consider naming this child_count or cardinality # or branch_cnt self.tnodes_cnt = len(self.__text_nodes) # consider naming this total self.ttl_strlen = sum([len(tnode) for tnode in self.__text_nodes]) # consider naming this average self.avg_strlen = self.ttl_strlen/self.tnodes_cnt
[ "def", "__learn_oneself", "(", "self", ")", ":", "if", "not", "self", ".", "__parent_path", "or", "not", "self", ".", "__text_nodes", ":", "raise", "Exception", "(", "\"This error occurred because the step constructor\\\r\n had insufficient textnode...
calculate cardinality, total and average string length
[ "calculate", "cardinality", "total", "and", "average", "string", "length" ]
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L161-L174
train
33,091
rodricios/eatiht
eatiht/eatiht_trees.py
TextNodeTree.__make_tree
def __make_tree(self): """Build a tree using lxml.html.builder and our subtrees""" # create div with "container" class div = E.DIV(E.CLASS("container")) # append header with title div.append(E.H2(self.__title)) # next, iterate through subtrees appending each tree to div for subtree in self.__subtrees: div.append(subtree.get_html()) # Connect div to body body = E.BODY(div) # attach body to html self.__htmltree = E.HTML( E.HEAD( E.TITLE(self.__title) ), body )
python
def __make_tree(self): """Build a tree using lxml.html.builder and our subtrees""" # create div with "container" class div = E.DIV(E.CLASS("container")) # append header with title div.append(E.H2(self.__title)) # next, iterate through subtrees appending each tree to div for subtree in self.__subtrees: div.append(subtree.get_html()) # Connect div to body body = E.BODY(div) # attach body to html self.__htmltree = E.HTML( E.HEAD( E.TITLE(self.__title) ), body )
[ "def", "__make_tree", "(", "self", ")", ":", "# create div with \"container\" class\r", "div", "=", "E", ".", "DIV", "(", "E", ".", "CLASS", "(", "\"container\"", ")", ")", "# append header with title\r", "div", ".", "append", "(", "E", ".", "H2", "(", "self...
Build a tree using lxml.html.builder and our subtrees
[ "Build", "a", "tree", "using", "lxml", ".", "html", ".", "builder", "and", "our", "subtrees" ]
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L228-L250
train
33,092
rodricios/eatiht
eatiht/eatiht_trees.py
TextNodeTree.get_html
def get_html(self): """Generates if need be and returns a simpler html document with text""" if self.__htmltree is not None: return self.__htmltree else: self.__make_tree() return self.__htmltree
python
def get_html(self): """Generates if need be and returns a simpler html document with text""" if self.__htmltree is not None: return self.__htmltree else: self.__make_tree() return self.__htmltree
[ "def", "get_html", "(", "self", ")", ":", "if", "self", ".", "__htmltree", "is", "not", "None", ":", "return", "self", ".", "__htmltree", "else", ":", "self", ".", "__make_tree", "(", ")", "return", "self", ".", "__htmltree" ]
Generates if need be and returns a simpler html document with text
[ "Generates", "if", "need", "be", "and", "returns", "a", "simpler", "html", "document", "with", "text" ]
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L252-L258
train
33,093
rodricios/eatiht
eatiht/eatiht_trees.py
TextNodeTree.get_html_string
def get_html_string(self): """Generates if need be and returns a simpler html string with extracted text""" if self.__htmltree is not None: return htmltostring(self.__htmltree) else: self.__make_tree() return htmltostring(self.__htmltree)
python
def get_html_string(self): """Generates if need be and returns a simpler html string with extracted text""" if self.__htmltree is not None: return htmltostring(self.__htmltree) else: self.__make_tree() return htmltostring(self.__htmltree)
[ "def", "get_html_string", "(", "self", ")", ":", "if", "self", ".", "__htmltree", "is", "not", "None", ":", "return", "htmltostring", "(", "self", ".", "__htmltree", ")", "else", ":", "self", ".", "__make_tree", "(", ")", "return", "htmltostring", "(", "...
Generates if need be and returns a simpler html string with extracted text
[ "Generates", "if", "need", "be", "and", "returns", "a", "simpler", "html", "string", "with", "extracted", "text" ]
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L260-L267
train
33,094
rodricios/eatiht
eatiht/eatiht_trees.py
TextNodeTree.get_text
def get_text(self): """Return all joined text from each subtree""" if self.__fulltext: return self.__fulltext else: self.__fulltext = "\n\n".join(text.get_text() for text in self.__subtrees) return self.__fulltext
python
def get_text(self): """Return all joined text from each subtree""" if self.__fulltext: return self.__fulltext else: self.__fulltext = "\n\n".join(text.get_text() for text in self.__subtrees) return self.__fulltext
[ "def", "get_text", "(", "self", ")", ":", "if", "self", ".", "__fulltext", ":", "return", "self", ".", "__fulltext", "else", ":", "self", ".", "__fulltext", "=", "\"\\n\\n\"", ".", "join", "(", "text", ".", "get_text", "(", ")", "for", "text", "in", ...
Return all joined text from each subtree
[ "Return", "all", "joined", "text", "from", "each", "subtree" ]
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L269-L276
train
33,095
rodricios/eatiht
eatiht/eatiht_trees.py
TextNodeTree.bootstrapify
def bootstrapify(self): """Add bootstrap cdn to headers of html""" if self.__htmltree is None: #raise Exception("HtmlTree has not been made yet") self.__make_tree() # add bootstrap cdn to head self.__htmltree.find('head').append( E.LINK(rel="stylesheet", href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css", type="text/css")) # center images for img_parent in self.__htmltree.xpath("//img/.."): # the space before the class to insert is CRITICAL! img_parent.attrib["class"] += " text-center" # make images responsive for img in self.__htmltree.xpath("//img"): # the space before the class to insert is CRITICAL! img.attrib["class"] += " img-responsive"
python
def bootstrapify(self): """Add bootstrap cdn to headers of html""" if self.__htmltree is None: #raise Exception("HtmlTree has not been made yet") self.__make_tree() # add bootstrap cdn to head self.__htmltree.find('head').append( E.LINK(rel="stylesheet", href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css", type="text/css")) # center images for img_parent in self.__htmltree.xpath("//img/.."): # the space before the class to insert is CRITICAL! img_parent.attrib["class"] += " text-center" # make images responsive for img in self.__htmltree.xpath("//img"): # the space before the class to insert is CRITICAL! img.attrib["class"] += " img-responsive"
[ "def", "bootstrapify", "(", "self", ")", ":", "if", "self", ".", "__htmltree", "is", "None", ":", "#raise Exception(\"HtmlTree has not been made yet\")\r", "self", ".", "__make_tree", "(", ")", "# add bootstrap cdn to head\r", "self", ".", "__htmltree", ".", "find", ...
Add bootstrap cdn to headers of html
[ "Add", "bootstrap", "cdn", "to", "headers", "of", "html" ]
7341c46327cfe7e0c9f226ef5e9808975c4d43da
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L280-L300
train
33,096
markokr/rarfile
dumprar.py
rar3_type
def rar3_type(btype): """RAR3 type code as string.""" if btype < rf.RAR_BLOCK_MARK or btype > rf.RAR_BLOCK_ENDARC: return "*UNKNOWN*" return block_strs[btype - rf.RAR_BLOCK_MARK]
python
def rar3_type(btype): """RAR3 type code as string.""" if btype < rf.RAR_BLOCK_MARK or btype > rf.RAR_BLOCK_ENDARC: return "*UNKNOWN*" return block_strs[btype - rf.RAR_BLOCK_MARK]
[ "def", "rar3_type", "(", "btype", ")", ":", "if", "btype", "<", "rf", ".", "RAR_BLOCK_MARK", "or", "btype", ">", "rf", ".", "RAR_BLOCK_ENDARC", ":", "return", "\"*UNKNOWN*\"", "return", "block_strs", "[", "btype", "-", "rf", ".", "RAR_BLOCK_MARK", "]" ]
RAR3 type code as string.
[ "RAR3", "type", "code", "as", "string", "." ]
2704344e8d7a1658c96c8ed8f449d7ba01bedea3
https://github.com/markokr/rarfile/blob/2704344e8d7a1658c96c8ed8f449d7ba01bedea3/dumprar.py#L44-L48
train
33,097
markokr/rarfile
dumprar.py
xprint
def xprint(m, *args): """Print string to stdout. Format unicode safely. """ if sys.hexversion < 0x3000000: m = m.decode('utf8') if args: m = m % args if sys.hexversion < 0x3000000: m = m.encode('utf8') sys.stdout.write(m) sys.stdout.write('\n')
python
def xprint(m, *args): """Print string to stdout. Format unicode safely. """ if sys.hexversion < 0x3000000: m = m.decode('utf8') if args: m = m % args if sys.hexversion < 0x3000000: m = m.encode('utf8') sys.stdout.write(m) sys.stdout.write('\n')
[ "def", "xprint", "(", "m", ",", "*", "args", ")", ":", "if", "sys", ".", "hexversion", "<", "0x3000000", ":", "m", "=", "m", ".", "decode", "(", "'utf8'", ")", "if", "args", ":", "m", "=", "m", "%", "args", "if", "sys", ".", "hexversion", "<", ...
Print string to stdout. Format unicode safely.
[ "Print", "string", "to", "stdout", "." ]
2704344e8d7a1658c96c8ed8f449d7ba01bedea3
https://github.com/markokr/rarfile/blob/2704344e8d7a1658c96c8ed8f449d7ba01bedea3/dumprar.py#L154-L166
train
33,098
markokr/rarfile
dumprar.py
render_flags
def render_flags(flags, bit_list): """Show bit names. """ res = [] known = 0 for bit in bit_list: known = known | bit[0] if flags & bit[0]: res.append(bit[1]) unknown = flags & ~known n = 0 while unknown: if unknown & 1: res.append("UNK_%04x" % (1 << n)) unknown = unknown >> 1 n += 1 if not res: return '-' return ",".join(res)
python
def render_flags(flags, bit_list): """Show bit names. """ res = [] known = 0 for bit in bit_list: known = known | bit[0] if flags & bit[0]: res.append(bit[1]) unknown = flags & ~known n = 0 while unknown: if unknown & 1: res.append("UNK_%04x" % (1 << n)) unknown = unknown >> 1 n += 1 if not res: return '-' return ",".join(res)
[ "def", "render_flags", "(", "flags", ",", "bit_list", ")", ":", "res", "=", "[", "]", "known", "=", "0", "for", "bit", "in", "bit_list", ":", "known", "=", "known", "|", "bit", "[", "0", "]", "if", "flags", "&", "bit", "[", "0", "]", ":", "res"...
Show bit names.
[ "Show", "bit", "names", "." ]
2704344e8d7a1658c96c8ed8f449d7ba01bedea3
https://github.com/markokr/rarfile/blob/2704344e8d7a1658c96c8ed8f449d7ba01bedea3/dumprar.py#L169-L189
train
33,099