repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
cloudboss/friend
friend/strings.py
format_obj_keys
def format_obj_keys(obj, formatter): """ Take a dictionary with string keys and recursively convert all keys from one form to another using the formatting function. The dictionary may contain lists as values, and any nested dictionaries within those lists will also be converted. :param object obj: The object to convert :param function formatter: The formatting function for keys, which takes and returns a string :returns: A new object with keys converted :rtype: object :Example: :: >>> obj = { ... 'dict-list': [ ... {'one-key': 123, 'two-key': 456}, ... {'threeKey': 789, 'four-key': 456}, ... ], ... 'some-other-key': 'some-unconverted-value' ... } >>> format_obj_keys(obj, lambda s: s.upper()) { 'DICT-LIST': [ {'ONE-KEY': 123, 'TWO-KEY': 456}, {'FOUR-KEY': 456, 'THREE-KEY': 789} ], 'SOME-OTHER-KEY': 'some-unconverted-value' } """ if type(obj) == list: return [format_obj_keys(o, formatter) for o in obj] elif type(obj) == dict: return {formatter(k): format_obj_keys(v, formatter) for k, v in obj.items()} else: return obj
python
def format_obj_keys(obj, formatter): """ Take a dictionary with string keys and recursively convert all keys from one form to another using the formatting function. The dictionary may contain lists as values, and any nested dictionaries within those lists will also be converted. :param object obj: The object to convert :param function formatter: The formatting function for keys, which takes and returns a string :returns: A new object with keys converted :rtype: object :Example: :: >>> obj = { ... 'dict-list': [ ... {'one-key': 123, 'two-key': 456}, ... {'threeKey': 789, 'four-key': 456}, ... ], ... 'some-other-key': 'some-unconverted-value' ... } >>> format_obj_keys(obj, lambda s: s.upper()) { 'DICT-LIST': [ {'ONE-KEY': 123, 'TWO-KEY': 456}, {'FOUR-KEY': 456, 'THREE-KEY': 789} ], 'SOME-OTHER-KEY': 'some-unconverted-value' } """ if type(obj) == list: return [format_obj_keys(o, formatter) for o in obj] elif type(obj) == dict: return {formatter(k): format_obj_keys(v, formatter) for k, v in obj.items()} else: return obj
[ "def", "format_obj_keys", "(", "obj", ",", "formatter", ")", ":", "if", "type", "(", "obj", ")", "==", "list", ":", "return", "[", "format_obj_keys", "(", "o", ",", "formatter", ")", "for", "o", "in", "obj", "]", "elif", "type", "(", "obj", ")", "=...
Take a dictionary with string keys and recursively convert all keys from one form to another using the formatting function. The dictionary may contain lists as values, and any nested dictionaries within those lists will also be converted. :param object obj: The object to convert :param function formatter: The formatting function for keys, which takes and returns a string :returns: A new object with keys converted :rtype: object :Example: :: >>> obj = { ... 'dict-list': [ ... {'one-key': 123, 'two-key': 456}, ... {'threeKey': 789, 'four-key': 456}, ... ], ... 'some-other-key': 'some-unconverted-value' ... } >>> format_obj_keys(obj, lambda s: s.upper()) { 'DICT-LIST': [ {'ONE-KEY': 123, 'TWO-KEY': 456}, {'FOUR-KEY': 456, 'THREE-KEY': 789} ], 'SOME-OTHER-KEY': 'some-unconverted-value' }
[ "Take", "a", "dictionary", "with", "string", "keys", "and", "recursively", "convert", "all", "keys", "from", "one", "form", "to", "another", "using", "the", "formatting", "function", "." ]
train
https://github.com/cloudboss/friend/blob/3357e6ec849552e3ae9ed28017ff0926e4006e4e/friend/strings.py#L394-L434
ajyoon/blur
blur/markov/graph.py
Graph.merge_nodes
def merge_nodes(self, keep_node, kill_node): """ Merge two nodes in the graph. Takes two nodes and merges them together, merging their links by combining the two link lists and summing the weights of links which point to the same node. All links in the graph pointing to ``kill_node`` will be merged into ``keep_node``. Links belonging to ``kill_node`` which point to targets not in ``self.node_list`` will not be merged into ``keep_node`` Args: keep_node (Node): node to be kept kill_node (Node): node to be deleted Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_3 = Node('Three') >>> node_1.add_link(node_3, 7) >>> node_2.add_link(node_1, 1) >>> node_2.add_link(node_2, 3) >>> node_3.add_link(node_2, 5) >>> graph = Graph([node_1, node_2, node_3]) >>> print([node.value for node in graph.node_list]) ['One', 'Two', 'Three'] >>> graph.merge_nodes(node_2, node_3) >>> print([node.value for node in graph.node_list]) ['One', 'Two'] >>> for link in graph.node_list[1].link_list: ... print('{} {}'.format(link.target.value, link.weight)) One 1 Two 8 """ # Merge links from kill_node to keep_node for kill_link in kill_node.link_list: if kill_link.target in self.node_list: keep_node.add_link(kill_link.target, kill_link.weight) # Merge any links in the graph pointing to kill_node into links # pointing to keep_node for node in self.node_list: for link in node.link_list: if link.target == kill_node: node.add_link(keep_node, link.weight) break # Remove kill_node from the graph self.remove_node(kill_node)
python
def merge_nodes(self, keep_node, kill_node): """ Merge two nodes in the graph. Takes two nodes and merges them together, merging their links by combining the two link lists and summing the weights of links which point to the same node. All links in the graph pointing to ``kill_node`` will be merged into ``keep_node``. Links belonging to ``kill_node`` which point to targets not in ``self.node_list`` will not be merged into ``keep_node`` Args: keep_node (Node): node to be kept kill_node (Node): node to be deleted Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_3 = Node('Three') >>> node_1.add_link(node_3, 7) >>> node_2.add_link(node_1, 1) >>> node_2.add_link(node_2, 3) >>> node_3.add_link(node_2, 5) >>> graph = Graph([node_1, node_2, node_3]) >>> print([node.value for node in graph.node_list]) ['One', 'Two', 'Three'] >>> graph.merge_nodes(node_2, node_3) >>> print([node.value for node in graph.node_list]) ['One', 'Two'] >>> for link in graph.node_list[1].link_list: ... print('{} {}'.format(link.target.value, link.weight)) One 1 Two 8 """ # Merge links from kill_node to keep_node for kill_link in kill_node.link_list: if kill_link.target in self.node_list: keep_node.add_link(kill_link.target, kill_link.weight) # Merge any links in the graph pointing to kill_node into links # pointing to keep_node for node in self.node_list: for link in node.link_list: if link.target == kill_node: node.add_link(keep_node, link.weight) break # Remove kill_node from the graph self.remove_node(kill_node)
[ "def", "merge_nodes", "(", "self", ",", "keep_node", ",", "kill_node", ")", ":", "# Merge links from kill_node to keep_node", "for", "kill_link", "in", "kill_node", ".", "link_list", ":", "if", "kill_link", ".", "target", "in", "self", ".", "node_list", ":", "ke...
Merge two nodes in the graph. Takes two nodes and merges them together, merging their links by combining the two link lists and summing the weights of links which point to the same node. All links in the graph pointing to ``kill_node`` will be merged into ``keep_node``. Links belonging to ``kill_node`` which point to targets not in ``self.node_list`` will not be merged into ``keep_node`` Args: keep_node (Node): node to be kept kill_node (Node): node to be deleted Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_3 = Node('Three') >>> node_1.add_link(node_3, 7) >>> node_2.add_link(node_1, 1) >>> node_2.add_link(node_2, 3) >>> node_3.add_link(node_2, 5) >>> graph = Graph([node_1, node_2, node_3]) >>> print([node.value for node in graph.node_list]) ['One', 'Two', 'Three'] >>> graph.merge_nodes(node_2, node_3) >>> print([node.value for node in graph.node_list]) ['One', 'Two'] >>> for link in graph.node_list[1].link_list: ... print('{} {}'.format(link.target.value, link.weight)) One 1 Two 8
[ "Merge", "two", "nodes", "in", "the", "graph", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L60-L112
ajyoon/blur
blur/markov/graph.py
Graph.add_nodes
def add_nodes(self, nodes): """ Add a given node or list of nodes to self.node_list. Args: node (Node or list[Node]): the node or list of nodes to add to the graph Returns: None Examples: Adding one node: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> graph.add_nodes(node_1) >>> print([node.value for node in graph.node_list]) ['One'] Adding multiple nodes at a time in a list: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> graph.add_nodes([node_1, node_2]) >>> print([node.value for node in graph.node_list]) ['One', 'Two'] """ # Generalize nodes to a list if not isinstance(nodes, list): add_list = [nodes] else: add_list = nodes self.node_list.extend(add_list)
python
def add_nodes(self, nodes): """ Add a given node or list of nodes to self.node_list. Args: node (Node or list[Node]): the node or list of nodes to add to the graph Returns: None Examples: Adding one node: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> graph.add_nodes(node_1) >>> print([node.value for node in graph.node_list]) ['One'] Adding multiple nodes at a time in a list: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> graph.add_nodes([node_1, node_2]) >>> print([node.value for node in graph.node_list]) ['One', 'Two'] """ # Generalize nodes to a list if not isinstance(nodes, list): add_list = [nodes] else: add_list = nodes self.node_list.extend(add_list)
[ "def", "add_nodes", "(", "self", ",", "nodes", ")", ":", "# Generalize nodes to a list", "if", "not", "isinstance", "(", "nodes", ",", "list", ")", ":", "add_list", "=", "[", "nodes", "]", "else", ":", "add_list", "=", "nodes", "self", ".", "node_list", ...
Add a given node or list of nodes to self.node_list. Args: node (Node or list[Node]): the node or list of nodes to add to the graph Returns: None Examples: Adding one node: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> graph.add_nodes(node_1) >>> print([node.value for node in graph.node_list]) ['One'] Adding multiple nodes at a time in a list: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> graph.add_nodes([node_1, node_2]) >>> print([node.value for node in graph.node_list]) ['One', 'Two']
[ "Add", "a", "given", "node", "or", "list", "of", "nodes", "to", "self", ".", "node_list", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L114-L150
ajyoon/blur
blur/markov/graph.py
Graph.feather_links
def feather_links(self, factor=0.01, include_self=False): """ Feather the links of connected nodes. Go through every node in the network and make it inherit the links of the other nodes it is connected to. Because the link weight sum for any given node can be very different within a graph, the weights of inherited links are made proportional to the sum weight of the parent nodes. Args: factor (float): multiplier of neighbor links include_self (bool): whether nodes can inherit links pointing to themselves Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_2, 1) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 >>> graph.feather_links(include_self=True) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 One 0.01 """ def feather_node(node): node_weight_sum = sum(l.weight for l in node.link_list) # Iterate over a copy of the original link list since we will # need to refer to this while modifying node.link_list for original_link in node.link_list[:]: neighbor_node = original_link.target neighbor_weight = original_link.weight feather_weight = neighbor_weight / node_weight_sum neighbor_node_weight_sum = sum(l.weight for l in neighbor_node.link_list) # Iterate over the links belonging to the neighbor_node, # copying its links to ``node`` with proportional weights for neighbor_link in neighbor_node.link_list: if (not include_self) and (neighbor_link.target == node): continue relative_link_weight = (neighbor_link.weight / neighbor_node_weight_sum) feathered_link_weight = round((relative_link_weight * feather_weight * factor), 2) node.add_link(neighbor_link.target, feathered_link_weight) for n in self.node_list: feather_node(n)
python
def feather_links(self, factor=0.01, include_self=False): """ Feather the links of connected nodes. Go through every node in the network and make it inherit the links of the other nodes it is connected to. Because the link weight sum for any given node can be very different within a graph, the weights of inherited links are made proportional to the sum weight of the parent nodes. Args: factor (float): multiplier of neighbor links include_self (bool): whether nodes can inherit links pointing to themselves Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_2, 1) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 >>> graph.feather_links(include_self=True) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 One 0.01 """ def feather_node(node): node_weight_sum = sum(l.weight for l in node.link_list) # Iterate over a copy of the original link list since we will # need to refer to this while modifying node.link_list for original_link in node.link_list[:]: neighbor_node = original_link.target neighbor_weight = original_link.weight feather_weight = neighbor_weight / node_weight_sum neighbor_node_weight_sum = sum(l.weight for l in neighbor_node.link_list) # Iterate over the links belonging to the neighbor_node, # copying its links to ``node`` with proportional weights for neighbor_link in neighbor_node.link_list: if (not include_self) and (neighbor_link.target == node): continue relative_link_weight = (neighbor_link.weight / neighbor_node_weight_sum) feathered_link_weight = round((relative_link_weight * feather_weight * factor), 2) node.add_link(neighbor_link.target, feathered_link_weight) for n in self.node_list: feather_node(n)
[ "def", "feather_links", "(", "self", ",", "factor", "=", "0.01", ",", "include_self", "=", "False", ")", ":", "def", "feather_node", "(", "node", ")", ":", "node_weight_sum", "=", "sum", "(", "l", ".", "weight", "for", "l", "in", "node", ".", "link_lis...
Feather the links of connected nodes. Go through every node in the network and make it inherit the links of the other nodes it is connected to. Because the link weight sum for any given node can be very different within a graph, the weights of inherited links are made proportional to the sum weight of the parent nodes. Args: factor (float): multiplier of neighbor links include_self (bool): whether nodes can inherit links pointing to themselves Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_2, 1) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 >>> graph.feather_links(include_self=True) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 One 0.01
[ "Feather", "the", "links", "of", "connected", "nodes", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L152-L206
ajyoon/blur
blur/markov/graph.py
Graph.apply_noise
def apply_noise(self, noise_weights=None, uniform_amount=0.1): """ Add noise to every link in the network. Can use either a ``uniform_amount`` or a ``noise_weight`` weight profile. If ``noise_weight`` is set, ``uniform_amount`` will be ignored. Args: noise_weights (list): a list of weight tuples of form ``(float, float)`` corresponding to ``(amount, weight)`` describing the noise to be added to each link in the graph uniform_amount (float): the maximum amount of uniform noise to be applied if ``noise_weights`` is not set Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 3) >>> node_1.add_link(node_2, 5) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) One 3 Two 5 >>> graph.apply_noise() >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format( ... link.target.value, link.weight)) # doctest: +SKIP One 3.154 Two 5.321 """ # Main node loop for node in self.node_list: for link in node.link_list: if noise_weights is not None: noise_amount = round(weighted_rand(noise_weights), 3) else: noise_amount = round(random.uniform( 0, link.weight * uniform_amount), 3) link.weight += noise_amount
python
def apply_noise(self, noise_weights=None, uniform_amount=0.1): """ Add noise to every link in the network. Can use either a ``uniform_amount`` or a ``noise_weight`` weight profile. If ``noise_weight`` is set, ``uniform_amount`` will be ignored. Args: noise_weights (list): a list of weight tuples of form ``(float, float)`` corresponding to ``(amount, weight)`` describing the noise to be added to each link in the graph uniform_amount (float): the maximum amount of uniform noise to be applied if ``noise_weights`` is not set Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 3) >>> node_1.add_link(node_2, 5) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) One 3 Two 5 >>> graph.apply_noise() >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format( ... link.target.value, link.weight)) # doctest: +SKIP One 3.154 Two 5.321 """ # Main node loop for node in self.node_list: for link in node.link_list: if noise_weights is not None: noise_amount = round(weighted_rand(noise_weights), 3) else: noise_amount = round(random.uniform( 0, link.weight * uniform_amount), 3) link.weight += noise_amount
[ "def", "apply_noise", "(", "self", ",", "noise_weights", "=", "None", ",", "uniform_amount", "=", "0.1", ")", ":", "# Main node loop", "for", "node", "in", "self", ".", "node_list", ":", "for", "link", "in", "node", ".", "link_list", ":", "if", "noise_weig...
Add noise to every link in the network. Can use either a ``uniform_amount`` or a ``noise_weight`` weight profile. If ``noise_weight`` is set, ``uniform_amount`` will be ignored. Args: noise_weights (list): a list of weight tuples of form ``(float, float)`` corresponding to ``(amount, weight)`` describing the noise to be added to each link in the graph uniform_amount (float): the maximum amount of uniform noise to be applied if ``noise_weights`` is not set Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 3) >>> node_1.add_link(node_2, 5) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) One 3 Two 5 >>> graph.apply_noise() >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format( ... link.target.value, link.weight)) # doctest: +SKIP One 3.154 Two 5.321
[ "Add", "noise", "to", "every", "link", "in", "the", "network", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L208-L253
ajyoon/blur
blur/markov/graph.py
Graph.find_node_by_value
def find_node_by_value(self, value): """ Find and return a node in self.node_list with the value ``value``. If multiple nodes exist with the value ``value``, return the first one found. If no such node exists, this returns ``None``. Args: value (Any): The value of the node to find Returns: Node: A node with value ``value`` if it was found None: If no node exists with value ``value`` Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> found_node = graph.find_node_by_value('One') >>> found_node == node_1 True """ try: return next(n for n in self.node_list if n.value == value) except StopIteration: return None
python
def find_node_by_value(self, value): """ Find and return a node in self.node_list with the value ``value``. If multiple nodes exist with the value ``value``, return the first one found. If no such node exists, this returns ``None``. Args: value (Any): The value of the node to find Returns: Node: A node with value ``value`` if it was found None: If no node exists with value ``value`` Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> found_node = graph.find_node_by_value('One') >>> found_node == node_1 True """ try: return next(n for n in self.node_list if n.value == value) except StopIteration: return None
[ "def", "find_node_by_value", "(", "self", ",", "value", ")", ":", "try", ":", "return", "next", "(", "n", "for", "n", "in", "self", ".", "node_list", "if", "n", ".", "value", "==", "value", ")", "except", "StopIteration", ":", "return", "None" ]
Find and return a node in self.node_list with the value ``value``. If multiple nodes exist with the value ``value``, return the first one found. If no such node exists, this returns ``None``. Args: value (Any): The value of the node to find Returns: Node: A node with value ``value`` if it was found None: If no node exists with value ``value`` Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> found_node = graph.find_node_by_value('One') >>> found_node == node_1 True
[ "Find", "and", "return", "a", "node", "in", "self", ".", "node_list", "with", "the", "value", "value", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L255-L283
ajyoon/blur
blur/markov/graph.py
Graph.remove_node
def remove_node(self, node): """ Remove a node from ``self.node_list`` and links pointing to it. If ``node`` is not in the graph, do nothing. Args: node (Node): The node to be removed Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node(node_1) >>> len(graph.node_list) 0 """ if node not in self.node_list: return self.node_list.remove(node) # Remove links pointing to the deleted node for n in self.node_list: n.link_list = [link for link in n.link_list if link.target != node]
python
def remove_node(self, node): """ Remove a node from ``self.node_list`` and links pointing to it. If ``node`` is not in the graph, do nothing. Args: node (Node): The node to be removed Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node(node_1) >>> len(graph.node_list) 0 """ if node not in self.node_list: return self.node_list.remove(node) # Remove links pointing to the deleted node for n in self.node_list: n.link_list = [link for link in n.link_list if link.target != node]
[ "def", "remove_node", "(", "self", ",", "node", ")", ":", "if", "node", "not", "in", "self", ".", "node_list", ":", "return", "self", ".", "node_list", ".", "remove", "(", "node", ")", "# Remove links pointing to the deleted node", "for", "n", "in", "self", ...
Remove a node from ``self.node_list`` and links pointing to it. If ``node`` is not in the graph, do nothing. Args: node (Node): The node to be removed Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node(node_1) >>> len(graph.node_list) 0
[ "Remove", "a", "node", "from", "self", ".", "node_list", "and", "links", "pointing", "to", "it", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L285-L310
ajyoon/blur
blur/markov/graph.py
Graph.remove_node_by_value
def remove_node_by_value(self, value): """ Delete all nodes in ``self.node_list`` with the value ``value``. Args: value (Any): The value to find and delete owners of. Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node_by_value('One') >>> len(graph.node_list) 0 """ self.node_list = [node for node in self.node_list if node.value != value] # Remove links pointing to the deleted node for node in self.node_list: node.link_list = [link for link in node.link_list if link.target.value != value]
python
def remove_node_by_value(self, value): """ Delete all nodes in ``self.node_list`` with the value ``value``. Args: value (Any): The value to find and delete owners of. Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node_by_value('One') >>> len(graph.node_list) 0 """ self.node_list = [node for node in self.node_list if node.value != value] # Remove links pointing to the deleted node for node in self.node_list: node.link_list = [link for link in node.link_list if link.target.value != value]
[ "def", "remove_node_by_value", "(", "self", ",", "value", ")", ":", "self", ".", "node_list", "=", "[", "node", "for", "node", "in", "self", ".", "node_list", "if", "node", ".", "value", "!=", "value", "]", "# Remove links pointing to the deleted node", "for",...
Delete all nodes in ``self.node_list`` with the value ``value``. Args: value (Any): The value to find and delete owners of. Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node_by_value('One') >>> len(graph.node_list) 0
[ "Delete", "all", "nodes", "in", "self", ".", "node_list", "with", "the", "value", "value", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L312-L334
ajyoon/blur
blur/markov/graph.py
Graph.has_node_with_value
def has_node_with_value(self, value): """ Whether any node in ``self.node_list`` has the value ``value``. Args: value (Any): The value to find in ``self.node_list`` Returns: bool Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.has_node_with_value('One') True >>> graph.has_node_with_value('Foo') False """ for node in self.node_list: if node.value == value: return True else: return False
python
def has_node_with_value(self, value): """ Whether any node in ``self.node_list`` has the value ``value``. Args: value (Any): The value to find in ``self.node_list`` Returns: bool Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.has_node_with_value('One') True >>> graph.has_node_with_value('Foo') False """ for node in self.node_list: if node.value == value: return True else: return False
[ "def", "has_node_with_value", "(", "self", ",", "value", ")", ":", "for", "node", "in", "self", ".", "node_list", ":", "if", "node", ".", "value", "==", "value", ":", "return", "True", "else", ":", "return", "False" ]
Whether any node in ``self.node_list`` has the value ``value``. Args: value (Any): The value to find in ``self.node_list`` Returns: bool Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.has_node_with_value('One') True >>> graph.has_node_with_value('Foo') False
[ "Whether", "any", "node", "in", "self", ".", "node_list", "has", "the", "value", "value", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L336-L358
ajyoon/blur
blur/markov/graph.py
Graph.pick
def pick(self, starting_node=None): """ Pick a node on the graph based on the links in a starting node. Additionally, set ``self.current_node`` to the newly picked node. * if ``starting_node`` is specified, start from there * if ``starting_node`` is ``None``, start from ``self.current_node`` * if ``starting_node`` is ``None`` and ``self.current_node`` is ``None``, pick a uniformally random node in ``self.node_list`` Args: starting_node (Node): ``Node`` to pick from. Returns: Node Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 5) >>> node_1.add_link(node_2, 2) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> [graph.pick().get_value() for i in range(5)] # doctest: +SKIP ['One', 'One', 'Two', 'One', 'One'] """ if starting_node is None: if self.current_node is None: random_node = random.choice(self.node_list) self.current_node = random_node return random_node else: starting_node = self.current_node # Use weighted_choice on start_node.link_list self.current_node = weighted_choice( [(link.target, link.weight) for link in starting_node.link_list]) return self.current_node
python
def pick(self, starting_node=None): """ Pick a node on the graph based on the links in a starting node. Additionally, set ``self.current_node`` to the newly picked node. * if ``starting_node`` is specified, start from there * if ``starting_node`` is ``None``, start from ``self.current_node`` * if ``starting_node`` is ``None`` and ``self.current_node`` is ``None``, pick a uniformally random node in ``self.node_list`` Args: starting_node (Node): ``Node`` to pick from. Returns: Node Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 5) >>> node_1.add_link(node_2, 2) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> [graph.pick().get_value() for i in range(5)] # doctest: +SKIP ['One', 'One', 'Two', 'One', 'One'] """ if starting_node is None: if self.current_node is None: random_node = random.choice(self.node_list) self.current_node = random_node return random_node else: starting_node = self.current_node # Use weighted_choice on start_node.link_list self.current_node = weighted_choice( [(link.target, link.weight) for link in starting_node.link_list]) return self.current_node
[ "def", "pick", "(", "self", ",", "starting_node", "=", "None", ")", ":", "if", "starting_node", "is", "None", ":", "if", "self", ".", "current_node", "is", "None", ":", "random_node", "=", "random", ".", "choice", "(", "self", ".", "node_list", ")", "s...
Pick a node on the graph based on the links in a starting node. Additionally, set ``self.current_node`` to the newly picked node. * if ``starting_node`` is specified, start from there * if ``starting_node`` is ``None``, start from ``self.current_node`` * if ``starting_node`` is ``None`` and ``self.current_node`` is ``None``, pick a uniformally random node in ``self.node_list`` Args: starting_node (Node): ``Node`` to pick from. Returns: Node Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 5) >>> node_1.add_link(node_2, 2) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> [graph.pick().get_value() for i in range(5)] # doctest: +SKIP ['One', 'One', 'Two', 'One', 'One']
[ "Pick", "a", "node", "on", "the", "graph", "based", "on", "the", "links", "in", "a", "starting", "node", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L360-L397
ajyoon/blur
blur/markov/graph.py
Graph.from_string
def from_string(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'): """ Read a string and derive of ``Graph`` from it. Words and punctuation marks are made into nodes. Punctuation marks are split into separate nodes unless they fall between other non-punctuation marks. ``'hello, world'`` is split into ``'hello'``, ``','``, and ``'world'``, while ``'who's there?'`` is split into ``"who's"``, ``'there'``, and ``'?'``. To group arbitrary characters together into a single node (e.g. to make ``'hello, world!'``), surround the text in question with ``group_marker_opening`` and ``group_marker_closing``. With the default value, this would look like ``'<<hello, world!>>'``. It is recommended that the group markers not appear anywhere in the source text where they aren't meant to act as such to prevent unexpected behavior. The exact regex for extracting nodes is defined by: :: expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format( ''.join('\\' + c for c in group_marker_opening), ''.join('\\' + c for c in group_marker_closing) ) Args: source (str): the string to derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. For example, if a dict entry is ``1: 1000`` this means that every word is linked to the word which follows it with a weight of 1000. ``-4: 350`` would mean that every word is linked to the 4th word behind it with a weight of 350. A key of ``0`` refers to the weight words get pointing to themselves. Keys pointing beyond the edge of the word list will wrap around the list. The default value for ``distance_weights`` is ``{1: 1}``. This means that each word gets equal weight to whatever word follows it. Consequently, if this default value is used and ``merge_same_words`` is ``False``, the resulting graph behavior will simply move linearly through the source, wrapping at the end to the beginning. merge_same_words (bool): if nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. It is strongly recommended that this be different than ``group_marker_opening`` to prevent unexpected behavior with the regex pattern. Returns: Graph Example: >>> graph = Graph.from_string('i have nothing to say and ' ... 'i am saying it and that is poetry.') >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'using chance algorithmic in algorithmic art easier blur' """ if distance_weights is None: distance_weights = {1: 1} # Convert distance_weights to a sorted list of tuples # To make output node list order more predictable sorted_weights_list = sorted(distance_weights.items(), key=lambda i: i[0]) # regex that matches: # * Anything surrounded by # group_marker_opening and group_marker_closing, # * Groups of punctuation marks followed by whitespace # * Any continuous group of non-whitespace characters # followed by whitespace expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format( ''.join('\\' + c for c in group_marker_opening), ''.join('\\' + c for c in group_marker_closing) ) matches = re.findall(expression, source) # Un-tuple matches since we are only using groups to strip brackets # Is there a better way to do this? words = [next(t for t in match if t) for match in matches] if merge_same_words: # Ensure a 1:1 correspondence between words and nodes, # and that all links point to these nodes as well # Create nodes for every unique word temp_node_list = [] for word in words: if word not in (n.value for n in temp_node_list): temp_node_list.append(Node(word)) # Loop through words, attaching links to nodes which correspond # to the current word. Ensure links also point to valid # corresponding nodes in the node list. for i, word in enumerate(words): matching_node = next( (n for n in temp_node_list if n.value == word)) for key, weight in sorted_weights_list: # Wrap the index of edge items wrapped_index = (key + i) % len(words) target_word = words[wrapped_index] matching_target_node = next( (n for n in temp_node_list if n.value == target_word)) matching_node.add_link(matching_target_node, weight) else: # Create one node for every (not necessarily unique) word. temp_node_list = [Node(word) for word in words] for i, node in enumerate(temp_node_list): for key, weight in sorted_weights_list: # Wrap the index of edge items wrapped_index = (key + i) % len(temp_node_list) node.add_link(temp_node_list[wrapped_index], weight) graph = cls() graph.add_nodes(temp_node_list) return graph
python
def from_string(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'): """ Read a string and derive of ``Graph`` from it. Words and punctuation marks are made into nodes. Punctuation marks are split into separate nodes unless they fall between other non-punctuation marks. ``'hello, world'`` is split into ``'hello'``, ``','``, and ``'world'``, while ``'who's there?'`` is split into ``"who's"``, ``'there'``, and ``'?'``. To group arbitrary characters together into a single node (e.g. to make ``'hello, world!'``), surround the text in question with ``group_marker_opening`` and ``group_marker_closing``. With the default value, this would look like ``'<<hello, world!>>'``. It is recommended that the group markers not appear anywhere in the source text where they aren't meant to act as such to prevent unexpected behavior. The exact regex for extracting nodes is defined by: :: expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format( ''.join('\\' + c for c in group_marker_opening), ''.join('\\' + c for c in group_marker_closing) ) Args: source (str): the string to derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. For example, if a dict entry is ``1: 1000`` this means that every word is linked to the word which follows it with a weight of 1000. ``-4: 350`` would mean that every word is linked to the 4th word behind it with a weight of 350. A key of ``0`` refers to the weight words get pointing to themselves. Keys pointing beyond the edge of the word list will wrap around the list. The default value for ``distance_weights`` is ``{1: 1}``. This means that each word gets equal weight to whatever word follows it. Consequently, if this default value is used and ``merge_same_words`` is ``False``, the resulting graph behavior will simply move linearly through the source, wrapping at the end to the beginning. merge_same_words (bool): if nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. It is strongly recommended that this be different than ``group_marker_opening`` to prevent unexpected behavior with the regex pattern. Returns: Graph Example: >>> graph = Graph.from_string('i have nothing to say and ' ... 'i am saying it and that is poetry.') >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'using chance algorithmic in algorithmic art easier blur' """ if distance_weights is None: distance_weights = {1: 1} # Convert distance_weights to a sorted list of tuples # To make output node list order more predictable sorted_weights_list = sorted(distance_weights.items(), key=lambda i: i[0]) # regex that matches: # * Anything surrounded by # group_marker_opening and group_marker_closing, # * Groups of punctuation marks followed by whitespace # * Any continuous group of non-whitespace characters # followed by whitespace expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format( ''.join('\\' + c for c in group_marker_opening), ''.join('\\' + c for c in group_marker_closing) ) matches = re.findall(expression, source) # Un-tuple matches since we are only using groups to strip brackets # Is there a better way to do this? words = [next(t for t in match if t) for match in matches] if merge_same_words: # Ensure a 1:1 correspondence between words and nodes, # and that all links point to these nodes as well # Create nodes for every unique word temp_node_list = [] for word in words: if word not in (n.value for n in temp_node_list): temp_node_list.append(Node(word)) # Loop through words, attaching links to nodes which correspond # to the current word. Ensure links also point to valid # corresponding nodes in the node list. for i, word in enumerate(words): matching_node = next( (n for n in temp_node_list if n.value == word)) for key, weight in sorted_weights_list: # Wrap the index of edge items wrapped_index = (key + i) % len(words) target_word = words[wrapped_index] matching_target_node = next( (n for n in temp_node_list if n.value == target_word)) matching_node.add_link(matching_target_node, weight) else: # Create one node for every (not necessarily unique) word. temp_node_list = [Node(word) for word in words] for i, node in enumerate(temp_node_list): for key, weight in sorted_weights_list: # Wrap the index of edge items wrapped_index = (key + i) % len(temp_node_list) node.add_link(temp_node_list[wrapped_index], weight) graph = cls() graph.add_nodes(temp_node_list) return graph
[ "def", "from_string", "(", "cls", ",", "source", ",", "distance_weights", "=", "None", ",", "merge_same_words", "=", "False", ",", "group_marker_opening", "=", "'<<'", ",", "group_marker_closing", "=", "'>>'", ")", ":", "if", "distance_weights", "is", "None", ...
Read a string and derive of ``Graph`` from it. Words and punctuation marks are made into nodes. Punctuation marks are split into separate nodes unless they fall between other non-punctuation marks. ``'hello, world'`` is split into ``'hello'``, ``','``, and ``'world'``, while ``'who's there?'`` is split into ``"who's"``, ``'there'``, and ``'?'``. To group arbitrary characters together into a single node (e.g. to make ``'hello, world!'``), surround the text in question with ``group_marker_opening`` and ``group_marker_closing``. With the default value, this would look like ``'<<hello, world!>>'``. It is recommended that the group markers not appear anywhere in the source text where they aren't meant to act as such to prevent unexpected behavior. The exact regex for extracting nodes is defined by: :: expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format( ''.join('\\' + c for c in group_marker_opening), ''.join('\\' + c for c in group_marker_closing) ) Args: source (str): the string to derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. For example, if a dict entry is ``1: 1000`` this means that every word is linked to the word which follows it with a weight of 1000. ``-4: 350`` would mean that every word is linked to the 4th word behind it with a weight of 350. A key of ``0`` refers to the weight words get pointing to themselves. Keys pointing beyond the edge of the word list will wrap around the list. The default value for ``distance_weights`` is ``{1: 1}``. This means that each word gets equal weight to whatever word follows it. Consequently, if this default value is used and ``merge_same_words`` is ``False``, the resulting graph behavior will simply move linearly through the source, wrapping at the end to the beginning. merge_same_words (bool): if nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. It is strongly recommended that this be different than ``group_marker_opening`` to prevent unexpected behavior with the regex pattern. Returns: Graph Example: >>> graph = Graph.from_string('i have nothing to say and ' ... 'i am saying it and that is poetry.') >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'using chance algorithmic in algorithmic art easier blur'
[ "Read", "a", "string", "and", "derive", "of", "Graph", "from", "it", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L400-L521
ajyoon/blur
blur/markov/graph.py
Graph.from_file
def from_file(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'): """ Read a string from a file and derive a ``Graph`` from it. This is a convenience function for opening a file and passing its contents to ``Graph.from_string()`` (see that for more detail) Args: source (str): the file to read and derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. See ``Graph.from_string`` for more detail. merge_same_words (bool): whether nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. Returns: Graph Example: >>> graph = Graph.from_file('cage.txt') # doctest: +SKIP >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'poetry i have nothing to say and i' """ source_string = open(source, 'r').read() return cls.from_string(source_string, distance_weights, merge_same_words, group_marker_opening=group_marker_opening, group_marker_closing=group_marker_closing)
python
def from_file(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'): """ Read a string from a file and derive a ``Graph`` from it. This is a convenience function for opening a file and passing its contents to ``Graph.from_string()`` (see that for more detail) Args: source (str): the file to read and derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. See ``Graph.from_string`` for more detail. merge_same_words (bool): whether nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. Returns: Graph Example: >>> graph = Graph.from_file('cage.txt') # doctest: +SKIP >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'poetry i have nothing to say and i' """ source_string = open(source, 'r').read() return cls.from_string(source_string, distance_weights, merge_same_words, group_marker_opening=group_marker_opening, group_marker_closing=group_marker_closing)
[ "def", "from_file", "(", "cls", ",", "source", ",", "distance_weights", "=", "None", ",", "merge_same_words", "=", "False", ",", "group_marker_opening", "=", "'<<'", ",", "group_marker_closing", "=", "'>>'", ")", ":", "source_string", "=", "open", "(", "source...
Read a string from a file and derive a ``Graph`` from it. This is a convenience function for opening a file and passing its contents to ``Graph.from_string()`` (see that for more detail) Args: source (str): the file to read and derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. See ``Graph.from_string`` for more detail. merge_same_words (bool): whether nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. Returns: Graph Example: >>> graph = Graph.from_file('cage.txt') # doctest: +SKIP >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'poetry i have nothing to say and i'
[ "Read", "a", "string", "from", "a", "file", "and", "derive", "a", "Graph", "from", "it", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L524-L559
clinicedc/edc-notification
edc_notification/notification/model_notification.py
ModelNotification.notify
def notify(self, force_notify=None, use_email=None, use_sms=None, **kwargs): """Overridden to only call `notify` if model matches. """ notified = False instance = kwargs.get("instance") if instance._meta.label_lower == self.model: notified = super().notify( force_notify=force_notify, use_email=use_email, use_sms=use_sms, **kwargs, ) return notified
python
def notify(self, force_notify=None, use_email=None, use_sms=None, **kwargs): """Overridden to only call `notify` if model matches. """ notified = False instance = kwargs.get("instance") if instance._meta.label_lower == self.model: notified = super().notify( force_notify=force_notify, use_email=use_email, use_sms=use_sms, **kwargs, ) return notified
[ "def", "notify", "(", "self", ",", "force_notify", "=", "None", ",", "use_email", "=", "None", ",", "use_sms", "=", "None", ",", "*", "*", "kwargs", ")", ":", "notified", "=", "False", "instance", "=", "kwargs", ".", "get", "(", "\"instance\"", ")", ...
Overridden to only call `notify` if model matches.
[ "Overridden", "to", "only", "call", "notify", "if", "model", "matches", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/notification/model_notification.py#L52-L64
GustavePate/distarkcli
distarkcli/utils/MyConfiguration.py
Configuration.getclient
def getclient(): ''' return settings dictionnary ''' if not Configuration.client_initialized: Configuration._initconf() Configuration.client_settings = Configuration.settings['client'] Configuration.client_initialized = True return Configuration.client_settings
python
def getclient(): ''' return settings dictionnary ''' if not Configuration.client_initialized: Configuration._initconf() Configuration.client_settings = Configuration.settings['client'] Configuration.client_initialized = True return Configuration.client_settings
[ "def", "getclient", "(", ")", ":", "if", "not", "Configuration", ".", "client_initialized", ":", "Configuration", ".", "_initconf", "(", ")", "Configuration", ".", "client_settings", "=", "Configuration", ".", "settings", "[", "'client'", "]", "Configuration", "...
return settings dictionnary
[ "return", "settings", "dictionnary" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/utils/MyConfiguration.py#L62-L70
GustavePate/distarkcli
distarkcli/utils/MyConfiguration.py
Configuration.getworker
def getworker(): ''' return settings dictionnary ''' if not Configuration.worker_initialized: Configuration._initconf() Configuration.worker_settings = Configuration.settings['worker'] Configuration.worker_initialized = True return Configuration.worker_settings
python
def getworker(): ''' return settings dictionnary ''' if not Configuration.worker_initialized: Configuration._initconf() Configuration.worker_settings = Configuration.settings['worker'] Configuration.worker_initialized = True return Configuration.worker_settings
[ "def", "getworker", "(", ")", ":", "if", "not", "Configuration", ".", "worker_initialized", ":", "Configuration", ".", "_initconf", "(", ")", "Configuration", ".", "worker_settings", "=", "Configuration", ".", "settings", "[", "'worker'", "]", "Configuration", "...
return settings dictionnary
[ "return", "settings", "dictionnary" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/utils/MyConfiguration.py#L77-L85
GustavePate/distarkcli
distarkcli/utils/MyConfiguration.py
Configuration.getbroker
def getbroker(): ''' return settings dictionnary ''' if not Configuration.broker_initialized: Configuration._initconf() Configuration.broker_settings = Configuration.settings['broker'] Configuration.broker_initialized = True return Configuration.broker_settings
python
def getbroker(): ''' return settings dictionnary ''' if not Configuration.broker_initialized: Configuration._initconf() Configuration.broker_settings = Configuration.settings['broker'] Configuration.broker_initialized = True return Configuration.broker_settings
[ "def", "getbroker", "(", ")", ":", "if", "not", "Configuration", ".", "broker_initialized", ":", "Configuration", ".", "_initconf", "(", ")", "Configuration", ".", "broker_settings", "=", "Configuration", ".", "settings", "[", "'broker'", "]", "Configuration", "...
return settings dictionnary
[ "return", "settings", "dictionnary" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/utils/MyConfiguration.py#L92-L100
oblalex/verboselib
verboselib/management/commands/compile.py
execute
def execute(prog_name, args=None): """ Adapted `compilemessages <http://bit.ly/1r3glSu>`_ command from Django. """ args = _get_parser().parse_args(args or []) locale, locale_dir = args.locale, args.locale_dir program = 'msgfmt' ensure_programs(program) def has_bom(fn): with open(fn, 'rb') as f: sample = f.read(4) return (sample[:3] == b'\xef\xbb\xbf' or sample.startswith(codecs.BOM_UTF16_LE) or sample.startswith(codecs.BOM_UTF16_BE)) if locale: dirs = [os.path.join(locale_dir, l, 'LC_MESSAGES') for l in locale] else: dirs = [locale_dir, ] for ldir in dirs: for dir_path, dir_names, file_names in os.walk(ldir): for file_name in file_names: if not file_name.endswith('.po'): continue print_out("Processing file '{:}' in {:}".format(file_name, dir_path)) file_path = os.path.join(dir_path, file_name) if has_bom(file_path): raise RuntimeError( "The '{:}' file has a BOM (Byte Order Mark). " "Verboselib supports only .po files encoded in UTF-8 " "and without any BOM.".format(file_path)) prefix = os.path.splitext(file_path)[0] args = [ program, '--check-format', '-o', native_path(prefix + '.mo'), native_path(prefix + '.po'), ] output, errors, status = popen_wrapper(args) if status: if errors: msg = "Execution of %s failed: %s" % (program, errors) else: msg = "Execution of %s failed" % program raise RuntimeError(msg)
python
def execute(prog_name, args=None): """ Adapted `compilemessages <http://bit.ly/1r3glSu>`_ command from Django. """ args = _get_parser().parse_args(args or []) locale, locale_dir = args.locale, args.locale_dir program = 'msgfmt' ensure_programs(program) def has_bom(fn): with open(fn, 'rb') as f: sample = f.read(4) return (sample[:3] == b'\xef\xbb\xbf' or sample.startswith(codecs.BOM_UTF16_LE) or sample.startswith(codecs.BOM_UTF16_BE)) if locale: dirs = [os.path.join(locale_dir, l, 'LC_MESSAGES') for l in locale] else: dirs = [locale_dir, ] for ldir in dirs: for dir_path, dir_names, file_names in os.walk(ldir): for file_name in file_names: if not file_name.endswith('.po'): continue print_out("Processing file '{:}' in {:}".format(file_name, dir_path)) file_path = os.path.join(dir_path, file_name) if has_bom(file_path): raise RuntimeError( "The '{:}' file has a BOM (Byte Order Mark). " "Verboselib supports only .po files encoded in UTF-8 " "and without any BOM.".format(file_path)) prefix = os.path.splitext(file_path)[0] args = [ program, '--check-format', '-o', native_path(prefix + '.mo'), native_path(prefix + '.po'), ] output, errors, status = popen_wrapper(args) if status: if errors: msg = "Execution of %s failed: %s" % (program, errors) else: msg = "Execution of %s failed" % program raise RuntimeError(msg)
[ "def", "execute", "(", "prog_name", ",", "args", "=", "None", ")", ":", "args", "=", "_get_parser", "(", ")", ".", "parse_args", "(", "args", "or", "[", "]", ")", "locale", ",", "locale_dir", "=", "args", ".", "locale", ",", "args", ".", "locale_dir"...
Adapted `compilemessages <http://bit.ly/1r3glSu>`_ command from Django.
[ "Adapted", "compilemessages", "<http", ":", "//", "bit", ".", "ly", "/", "1r3glSu", ">", "_", "command", "from", "Django", "." ]
train
https://github.com/oblalex/verboselib/blob/3c108bef060b091e1f7c08861ab07672c87ddcff/verboselib/management/commands/compile.py#L20-L68
GustavePate/distarkcli
distarkcli/utils/zoo.py
ZooMock.getConf
def getConf(self, conftype): ''' conftype must be a Zooborg constant ''' if conftype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.getConf: invalid type') zooconf={} #TODO: specialconf entries for the mock if conftype == ZooConst.CLIENT: zooconf['broker'] = {} zooconf['broker']['connectionstr'] = b"tcp://localhost:5555" elif conftype == ZooConst.BROKER: zooconf['bindstr']=b"tcp://*:5555" elif conftype == ZooConst.WORKER: zooconf['broker'] = {} zooconf['broker']['connectionstr'] = b"tcp://localhost:5555" else: raise Exception("ZooBorgconftype unknown") return zooconf
python
def getConf(self, conftype): ''' conftype must be a Zooborg constant ''' if conftype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.getConf: invalid type') zooconf={} #TODO: specialconf entries for the mock if conftype == ZooConst.CLIENT: zooconf['broker'] = {} zooconf['broker']['connectionstr'] = b"tcp://localhost:5555" elif conftype == ZooConst.BROKER: zooconf['bindstr']=b"tcp://*:5555" elif conftype == ZooConst.WORKER: zooconf['broker'] = {} zooconf['broker']['connectionstr'] = b"tcp://localhost:5555" else: raise Exception("ZooBorgconftype unknown") return zooconf
[ "def", "getConf", "(", "self", ",", "conftype", ")", ":", "if", "conftype", "not", "in", "[", "ZooConst", ".", "CLIENT", ",", "ZooConst", ".", "WORKER", ",", "ZooConst", ".", "BROKER", "]", ":", "raise", "Exception", "(", "'Zooborg.getConf: invalid type'", ...
conftype must be a Zooborg constant
[ "conftype", "must", "be", "a", "Zooborg", "constant" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/utils/zoo.py#L35-L61
GustavePate/distarkcli
distarkcli/utils/zoo.py
ZooBorg.register
def register(self, itemtype, item_id, handler): ''' register the item in zookeeper /list/ itemtype must be a Zooborg constant item_id must be a string handler: method to call on conf change ''' # Create a node with data #TODO: add system properties in data (ip, os) #TODO: add uniq client id if itemtype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.register: invalid type') self.initconn() self.zk.ensure_path("/distark/" + itemtype + "/list") path=''.join(['/distark/' + itemtype + '/list/', item_id]) self.registred.append(path) data=b'ip̂,os' if not(self.zk.exists(path)): self.zk.create(path, data, None, True) else: self.zk.delete(path, recursive=True) self.zk.create(path, data, None, True) #reload conf if change in zoo self.zk.DataWatch('/distark/' + itemtype + '/conf/conf_reload_trigger', handler)
python
def register(self, itemtype, item_id, handler): ''' register the item in zookeeper /list/ itemtype must be a Zooborg constant item_id must be a string handler: method to call on conf change ''' # Create a node with data #TODO: add system properties in data (ip, os) #TODO: add uniq client id if itemtype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.register: invalid type') self.initconn() self.zk.ensure_path("/distark/" + itemtype + "/list") path=''.join(['/distark/' + itemtype + '/list/', item_id]) self.registred.append(path) data=b'ip̂,os' if not(self.zk.exists(path)): self.zk.create(path, data, None, True) else: self.zk.delete(path, recursive=True) self.zk.create(path, data, None, True) #reload conf if change in zoo self.zk.DataWatch('/distark/' + itemtype + '/conf/conf_reload_trigger', handler)
[ "def", "register", "(", "self", ",", "itemtype", ",", "item_id", ",", "handler", ")", ":", "# Create a node with data", "#TODO: add system properties in data (ip, os)", "#TODO: add uniq client id", "if", "itemtype", "not", "in", "[", "ZooConst", ".", "CLIENT", ",", "Z...
register the item in zookeeper /list/ itemtype must be a Zooborg constant item_id must be a string handler: method to call on conf change
[ "register", "the", "item", "in", "zookeeper", "/", "list", "/", "itemtype", "must", "be", "a", "Zooborg", "constant", "item_id", "must", "be", "a", "string", "handler", ":", "method", "to", "call", "on", "conf", "change" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/utils/zoo.py#L135-L159
GustavePate/distarkcli
distarkcli/utils/zoo.py
ZooBorg.unregister
def unregister(self, itemtype, item_id): ''' deregister the item in zookeeper /list/ itemtype must be a Zooborg constant item_id must be a string ''' if itemtype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.unregister: invalid type') self.initconn() self.zk.ensure_path("/distark/" + itemtype + "/list") path=''.join(['/distark/' + itemtype + '/list/', item_id]) if self.zk.exists(path): self.zk.delete(path, recursive=True)
python
def unregister(self, itemtype, item_id): ''' deregister the item in zookeeper /list/ itemtype must be a Zooborg constant item_id must be a string ''' if itemtype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.unregister: invalid type') self.initconn() self.zk.ensure_path("/distark/" + itemtype + "/list") path=''.join(['/distark/' + itemtype + '/list/', item_id]) if self.zk.exists(path): self.zk.delete(path, recursive=True)
[ "def", "unregister", "(", "self", ",", "itemtype", ",", "item_id", ")", ":", "if", "itemtype", "not", "in", "[", "ZooConst", ".", "CLIENT", ",", "ZooConst", ".", "WORKER", ",", "ZooConst", ".", "BROKER", "]", ":", "raise", "Exception", "(", "'Zooborg.unr...
deregister the item in zookeeper /list/ itemtype must be a Zooborg constant item_id must be a string
[ "deregister", "the", "item", "in", "zookeeper", "/", "list", "/", "itemtype", "must", "be", "a", "Zooborg", "constant", "item_id", "must", "be", "a", "string" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/utils/zoo.py#L161-L173
GustavePate/distarkcli
distarkcli/utils/zoo.py
ZooBorg.getList
def getList(self, listtype): ''' listtype must be a Zooborg constant ''' if listtype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.getList: invalid type') self.initconn() return self.zk.get_children('/distark/' + listtype + '/list')
python
def getList(self, listtype): ''' listtype must be a Zooborg constant ''' if listtype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.getList: invalid type') self.initconn() return self.zk.get_children('/distark/' + listtype + '/list')
[ "def", "getList", "(", "self", ",", "listtype", ")", ":", "if", "listtype", "not", "in", "[", "ZooConst", ".", "CLIENT", ",", "ZooConst", ".", "WORKER", ",", "ZooConst", ".", "BROKER", "]", ":", "raise", "Exception", "(", "'Zooborg.getList: invalid type'", ...
listtype must be a Zooborg constant
[ "listtype", "must", "be", "a", "Zooborg", "constant" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/utils/zoo.py#L175-L182
GustavePate/distarkcli
distarkcli/utils/zoo.py
ZooBorg.getConf
def getConf(self, conftype): ''' conftype must be a Zooborg constant ''' zooconf={} if conftype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.getConf: invalid type') self.initconn() if conftype in [ZooConst.CLIENT, ZooConst.WORKER]: zooconf={'broker': {'connectionstr': None}} zoopath='/distark/' + conftype + '/conf/broker/connectionstr' zooconf['broker']['connectionstr'], stat = self.zk.get(zoopath) if conftype in [ZooConst.BROKER]: zooconf={'bindstr': None} zoopath='/distark/' + conftype + '/conf/bindstr' zooconf['bindstr'], stat = self.zk.get(zoopath) return zooconf
python
def getConf(self, conftype): ''' conftype must be a Zooborg constant ''' zooconf={} if conftype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.getConf: invalid type') self.initconn() if conftype in [ZooConst.CLIENT, ZooConst.WORKER]: zooconf={'broker': {'connectionstr': None}} zoopath='/distark/' + conftype + '/conf/broker/connectionstr' zooconf['broker']['connectionstr'], stat = self.zk.get(zoopath) if conftype in [ZooConst.BROKER]: zooconf={'bindstr': None} zoopath='/distark/' + conftype + '/conf/bindstr' zooconf['bindstr'], stat = self.zk.get(zoopath) return zooconf
[ "def", "getConf", "(", "self", ",", "conftype", ")", ":", "zooconf", "=", "{", "}", "if", "conftype", "not", "in", "[", "ZooConst", ".", "CLIENT", ",", "ZooConst", ".", "WORKER", ",", "ZooConst", ".", "BROKER", "]", ":", "raise", "Exception", "(", "'...
conftype must be a Zooborg constant
[ "conftype", "must", "be", "a", "Zooborg", "constant" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/utils/zoo.py#L193-L212
yprez/django-useful
useful/helpers/get_object_or_none.py
get_object_or_none
def get_object_or_none(model, *args, **kwargs): """ Like get_object_or_404, but doesn't throw an exception. Allows querying for an object that might not exist without triggering an exception. """ try: return model._default_manager.get(*args, **kwargs) except model.DoesNotExist: return None
python
def get_object_or_none(model, *args, **kwargs): """ Like get_object_or_404, but doesn't throw an exception. Allows querying for an object that might not exist without triggering an exception. """ try: return model._default_manager.get(*args, **kwargs) except model.DoesNotExist: return None
[ "def", "get_object_or_none", "(", "model", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "model", ".", "_default_manager", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "model", ".", "DoesNotExist", ...
Like get_object_or_404, but doesn't throw an exception. Allows querying for an object that might not exist without triggering an exception.
[ "Like", "get_object_or_404", "but", "doesn", "t", "throw", "an", "exception", "." ]
train
https://github.com/yprez/django-useful/blob/288aa46df6f40fb0323c3d0c0efcded887472538/useful/helpers/get_object_or_none.py#L1-L11
duniter/duniter-python-api
duniterpy/api/client.py
parse_text
def parse_text(text: str, schema: dict) -> Any: """ Validate and parse the BMA answer from websocket :param text: the bma answer :param schema: dict for jsonschema :return: the json data """ try: data = json.loads(text) jsonschema.validate(data, schema) except (TypeError, json.decoder.JSONDecodeError): raise jsonschema.ValidationError("Could not parse json") return data
python
def parse_text(text: str, schema: dict) -> Any: """ Validate and parse the BMA answer from websocket :param text: the bma answer :param schema: dict for jsonschema :return: the json data """ try: data = json.loads(text) jsonschema.validate(data, schema) except (TypeError, json.decoder.JSONDecodeError): raise jsonschema.ValidationError("Could not parse json") return data
[ "def", "parse_text", "(", "text", ":", "str", ",", "schema", ":", "dict", ")", "->", "Any", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "text", ")", "jsonschema", ".", "validate", "(", "data", ",", "schema", ")", "except", "(", "TypeEr...
Validate and parse the BMA answer from websocket :param text: the bma answer :param schema: dict for jsonschema :return: the json data
[ "Validate", "and", "parse", "the", "BMA", "answer", "from", "websocket" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L37-L51
duniter/duniter-python-api
duniterpy/api/client.py
parse_error
def parse_error(text: str) -> Any: """ Validate and parse the BMA answer from websocket :param text: the bma error :return: the json data """ try: data = json.loads(text) jsonschema.validate(data, ERROR_SCHEMA) except (TypeError, json.decoder.JSONDecodeError) as e: raise jsonschema.ValidationError("Could not parse json : {0}".format(str(e))) return data
python
def parse_error(text: str) -> Any: """ Validate and parse the BMA answer from websocket :param text: the bma error :return: the json data """ try: data = json.loads(text) jsonschema.validate(data, ERROR_SCHEMA) except (TypeError, json.decoder.JSONDecodeError) as e: raise jsonschema.ValidationError("Could not parse json : {0}".format(str(e))) return data
[ "def", "parse_error", "(", "text", ":", "str", ")", "->", "Any", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "text", ")", "jsonschema", ".", "validate", "(", "data", ",", "ERROR_SCHEMA", ")", "except", "(", "TypeError", ",", "json", ".",...
Validate and parse the BMA answer from websocket :param text: the bma error :return: the json data
[ "Validate", "and", "parse", "the", "BMA", "answer", "from", "websocket" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L54-L67
duniter/duniter-python-api
duniterpy/api/client.py
parse_response
async def parse_response(response: ClientResponse, schema: dict) -> Any: """ Validate and parse the BMA answer :param response: Response of aiohttp request :param schema: The expected response structure :return: the json data """ try: data = await response.json() response.close() if schema is not None: jsonschema.validate(data, schema) return data except (TypeError, json.decoder.JSONDecodeError) as e: raise jsonschema.ValidationError("Could not parse json : {0}".format(str(e)))
python
async def parse_response(response: ClientResponse, schema: dict) -> Any: """ Validate and parse the BMA answer :param response: Response of aiohttp request :param schema: The expected response structure :return: the json data """ try: data = await response.json() response.close() if schema is not None: jsonschema.validate(data, schema) return data except (TypeError, json.decoder.JSONDecodeError) as e: raise jsonschema.ValidationError("Could not parse json : {0}".format(str(e)))
[ "async", "def", "parse_response", "(", "response", ":", "ClientResponse", ",", "schema", ":", "dict", ")", "->", "Any", ":", "try", ":", "data", "=", "await", "response", ".", "json", "(", ")", "response", ".", "close", "(", ")", "if", "schema", "is", ...
Validate and parse the BMA answer :param response: Response of aiohttp request :param schema: The expected response structure :return: the json data
[ "Validate", "and", "parse", "the", "BMA", "answer" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L70-L85
duniter/duniter-python-api
duniterpy/api/client.py
API.reverse_url
def reverse_url(self, scheme: str, path: str) -> str: """ Reverses the url using scheme and path given in parameter. :param scheme: Scheme of the url :param path: Path of the url :return: """ # remove starting slash in path if present path = path.lstrip('/') server, port = self.connection_handler.server, self.connection_handler.port if self.connection_handler.path: url = '{scheme}://{server}:{port}/{path}'.format(scheme=scheme, server=server, port=port, path=path) else: url = '{scheme}://{server}:{port}/'.format(scheme=scheme, server=server, port=port) return url + path
python
def reverse_url(self, scheme: str, path: str) -> str: """ Reverses the url using scheme and path given in parameter. :param scheme: Scheme of the url :param path: Path of the url :return: """ # remove starting slash in path if present path = path.lstrip('/') server, port = self.connection_handler.server, self.connection_handler.port if self.connection_handler.path: url = '{scheme}://{server}:{port}/{path}'.format(scheme=scheme, server=server, port=port, path=path) else: url = '{scheme}://{server}:{port}/'.format(scheme=scheme, server=server, port=port) return url + path
[ "def", "reverse_url", "(", "self", ",", "scheme", ":", "str", ",", "path", ":", "str", ")", "->", "str", ":", "# remove starting slash in path if present", "path", "=", "path", ".", "lstrip", "(", "'/'", ")", "server", ",", "port", "=", "self", ".", "con...
Reverses the url using scheme and path given in parameter. :param scheme: Scheme of the url :param path: Path of the url :return:
[ "Reverses", "the", "url", "using", "scheme", "and", "path", "given", "in", "parameter", "." ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L103-L125
duniter/duniter-python-api
duniterpy/api/client.py
API.requests_get
async def requests_get(self, path: str, **kwargs) -> ClientResponse: """ Requests GET wrapper in order to use API parameters. :param path: the request path :return: """ logging.debug("Request : {0}".format(self.reverse_url(self.connection_handler.http_scheme, path))) url = self.reverse_url(self.connection_handler.http_scheme, path) response = await self.connection_handler.session.get(url, params=kwargs, headers=self.headers, proxy=self.connection_handler.proxy, timeout=15) if response.status != 200: try: error_data = parse_error(await response.text()) raise DuniterError(error_data) except (TypeError, jsonschema.ValidationError): raise ValueError('status code != 200 => %d (%s)' % (response.status, (await response.text()))) return response
python
async def requests_get(self, path: str, **kwargs) -> ClientResponse: """ Requests GET wrapper in order to use API parameters. :param path: the request path :return: """ logging.debug("Request : {0}".format(self.reverse_url(self.connection_handler.http_scheme, path))) url = self.reverse_url(self.connection_handler.http_scheme, path) response = await self.connection_handler.session.get(url, params=kwargs, headers=self.headers, proxy=self.connection_handler.proxy, timeout=15) if response.status != 200: try: error_data = parse_error(await response.text()) raise DuniterError(error_data) except (TypeError, jsonschema.ValidationError): raise ValueError('status code != 200 => %d (%s)' % (response.status, (await response.text()))) return response
[ "async", "def", "requests_get", "(", "self", ",", "path", ":", "str", ",", "*", "*", "kwargs", ")", "->", "ClientResponse", ":", "logging", ".", "debug", "(", "\"Request : {0}\"", ".", "format", "(", "self", ".", "reverse_url", "(", "self", ".", "connect...
Requests GET wrapper in order to use API parameters. :param path: the request path :return:
[ "Requests", "GET", "wrapper", "in", "order", "to", "use", "API", "parameters", "." ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L127-L146
duniter/duniter-python-api
duniterpy/api/client.py
API.requests_post
async def requests_post(self, path: str, **kwargs) -> ClientResponse: """ Requests POST wrapper in order to use API parameters. :param path: the request path :return: """ if 'self_' in kwargs: kwargs['self'] = kwargs.pop('self_') logging.debug("POST : {0}".format(kwargs)) response = await self.connection_handler.session.post( self.reverse_url(self.connection_handler.http_scheme, path), data=kwargs, headers=self.headers, proxy=self.connection_handler.proxy, timeout=15 ) return response
python
async def requests_post(self, path: str, **kwargs) -> ClientResponse: """ Requests POST wrapper in order to use API parameters. :param path: the request path :return: """ if 'self_' in kwargs: kwargs['self'] = kwargs.pop('self_') logging.debug("POST : {0}".format(kwargs)) response = await self.connection_handler.session.post( self.reverse_url(self.connection_handler.http_scheme, path), data=kwargs, headers=self.headers, proxy=self.connection_handler.proxy, timeout=15 ) return response
[ "async", "def", "requests_post", "(", "self", ",", "path", ":", "str", ",", "*", "*", "kwargs", ")", "->", "ClientResponse", ":", "if", "'self_'", "in", "kwargs", ":", "kwargs", "[", "'self'", "]", "=", "kwargs", ".", "pop", "(", "'self_'", ")", "log...
Requests POST wrapper in order to use API parameters. :param path: the request path :return:
[ "Requests", "POST", "wrapper", "in", "order", "to", "use", "API", "parameters", "." ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L148-L166
duniter/duniter-python-api
duniterpy/api/client.py
API.connect_ws
def connect_ws(self, path: str) -> _WSRequestContextManager: """ Connect to a websocket in order to use API parameters In reality, aiohttp.session.ws_connect returns a aiohttp.client._WSRequestContextManager instance. It must be used in a with statement to get the ClientWebSocketResponse instance from it (__aenter__). At the end of the with statement, aiohttp.client._WSRequestContextManager.__aexit__ is called and close the ClientWebSocketResponse in it. :param path: the url path :return: """ url = self.reverse_url(self.connection_handler.ws_scheme, path) return self.connection_handler.session.ws_connect(url, proxy=self.connection_handler.proxy)
python
def connect_ws(self, path: str) -> _WSRequestContextManager: """ Connect to a websocket in order to use API parameters In reality, aiohttp.session.ws_connect returns a aiohttp.client._WSRequestContextManager instance. It must be used in a with statement to get the ClientWebSocketResponse instance from it (__aenter__). At the end of the with statement, aiohttp.client._WSRequestContextManager.__aexit__ is called and close the ClientWebSocketResponse in it. :param path: the url path :return: """ url = self.reverse_url(self.connection_handler.ws_scheme, path) return self.connection_handler.session.ws_connect(url, proxy=self.connection_handler.proxy)
[ "def", "connect_ws", "(", "self", ",", "path", ":", "str", ")", "->", "_WSRequestContextManager", ":", "url", "=", "self", ".", "reverse_url", "(", "self", ".", "connection_handler", ".", "ws_scheme", ",", "path", ")", "return", "self", ".", "connection_hand...
Connect to a websocket in order to use API parameters In reality, aiohttp.session.ws_connect returns a aiohttp.client._WSRequestContextManager instance. It must be used in a with statement to get the ClientWebSocketResponse instance from it (__aenter__). At the end of the with statement, aiohttp.client._WSRequestContextManager.__aexit__ is called and close the ClientWebSocketResponse in it. :param path: the url path :return:
[ "Connect", "to", "a", "websocket", "in", "order", "to", "use", "API", "parameters" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L168-L181
duniter/duniter-python-api
duniterpy/api/client.py
Client.post
async def post(self, url_path: str, params: dict = None, rtype: str = RESPONSE_JSON, schema: dict = None) -> Any: """ POST request on self.endpoint + url_path :param url_path: Url encoded path following the endpoint :param params: Url query string parameters dictionary :param rtype: Response type :param schema: Json Schema to validate response (optional, default None) :return: """ if params is None: params = dict() client = API(self.endpoint.conn_handler(self.session, self.proxy)) # get aiohttp response response = await client.requests_post(url_path, **params) # if schema supplied... if schema is not None: # validate response await parse_response(response, schema) # return the chosen type if rtype == RESPONSE_AIOHTTP: return response elif rtype == RESPONSE_TEXT: return await response.text() elif rtype == RESPONSE_JSON: return await response.json()
python
async def post(self, url_path: str, params: dict = None, rtype: str = RESPONSE_JSON, schema: dict = None) -> Any: """ POST request on self.endpoint + url_path :param url_path: Url encoded path following the endpoint :param params: Url query string parameters dictionary :param rtype: Response type :param schema: Json Schema to validate response (optional, default None) :return: """ if params is None: params = dict() client = API(self.endpoint.conn_handler(self.session, self.proxy)) # get aiohttp response response = await client.requests_post(url_path, **params) # if schema supplied... if schema is not None: # validate response await parse_response(response, schema) # return the chosen type if rtype == RESPONSE_AIOHTTP: return response elif rtype == RESPONSE_TEXT: return await response.text() elif rtype == RESPONSE_JSON: return await response.json()
[ "async", "def", "post", "(", "self", ",", "url_path", ":", "str", ",", "params", ":", "dict", "=", "None", ",", "rtype", ":", "str", "=", "RESPONSE_JSON", ",", "schema", ":", "dict", "=", "None", ")", "->", "Any", ":", "if", "params", "is", "None",...
POST request on self.endpoint + url_path :param url_path: Url encoded path following the endpoint :param params: Url query string parameters dictionary :param rtype: Response type :param schema: Json Schema to validate response (optional, default None) :return:
[ "POST", "request", "on", "self", ".", "endpoint", "+", "url_path" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L246-L275
duniter/duniter-python-api
duniterpy/api/client.py
Client.connect_ws
def connect_ws(self, path: str) -> _WSRequestContextManager: """ Connect to a websocket in order to use API parameters :param path: the url path :return: """ client = API(self.endpoint.conn_handler(self.session, self.proxy)) return client.connect_ws(path)
python
def connect_ws(self, path: str) -> _WSRequestContextManager: """ Connect to a websocket in order to use API parameters :param path: the url path :return: """ client = API(self.endpoint.conn_handler(self.session, self.proxy)) return client.connect_ws(path)
[ "def", "connect_ws", "(", "self", ",", "path", ":", "str", ")", "->", "_WSRequestContextManager", ":", "client", "=", "API", "(", "self", ".", "endpoint", ".", "conn_handler", "(", "self", ".", "session", ",", "self", ".", "proxy", ")", ")", "return", ...
Connect to a websocket in order to use API parameters :param path: the url path :return:
[ "Connect", "to", "a", "websocket", "in", "order", "to", "use", "API", "parameters" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/client.py#L277-L285
rduplain/jeni-python
setup.py
extract_version
def extract_version(filepath='jeni.py', name='__version__'): """Parse __version__ out of given Python file. Given jeni.py has dependencies, `from jeni import __version__` will fail. """ context = {} for line in open(filepath): if name in line: exec(line, context) break else: raise RuntimeError('{} not found in {}'.format(name, filepath)) return context[name]
python
def extract_version(filepath='jeni.py', name='__version__'): """Parse __version__ out of given Python file. Given jeni.py has dependencies, `from jeni import __version__` will fail. """ context = {} for line in open(filepath): if name in line: exec(line, context) break else: raise RuntimeError('{} not found in {}'.format(name, filepath)) return context[name]
[ "def", "extract_version", "(", "filepath", "=", "'jeni.py'", ",", "name", "=", "'__version__'", ")", ":", "context", "=", "{", "}", "for", "line", "in", "open", "(", "filepath", ")", ":", "if", "name", "in", "line", ":", "exec", "(", "line", ",", "co...
Parse __version__ out of given Python file. Given jeni.py has dependencies, `from jeni import __version__` will fail.
[ "Parse", "__version__", "out", "of", "given", "Python", "file", "." ]
train
https://github.com/rduplain/jeni-python/blob/feca12ce5e4f0438ae5d7bec59d61826063594f1/setup.py#L25-L37
mozilla/socorrolib
socorrolib/lib/sqlutils.py
quote_value
def quote_value(value): """return the value ready to be used as a value in a SQL string. For example you can safely do this: cursor.execute('select * from table where key = %s' % quote_value(val)) and you don't have to worry about possible SQL injections. """ adapted = adapt(value) if hasattr(adapted, 'getquoted'): adapted = adapted.getquoted() return adapted
python
def quote_value(value): """return the value ready to be used as a value in a SQL string. For example you can safely do this: cursor.execute('select * from table where key = %s' % quote_value(val)) and you don't have to worry about possible SQL injections. """ adapted = adapt(value) if hasattr(adapted, 'getquoted'): adapted = adapted.getquoted() return adapted
[ "def", "quote_value", "(", "value", ")", ":", "adapted", "=", "adapt", "(", "value", ")", "if", "hasattr", "(", "adapted", ",", "'getquoted'", ")", ":", "adapted", "=", "adapted", ".", "getquoted", "(", ")", "return", "adapted" ]
return the value ready to be used as a value in a SQL string. For example you can safely do this: cursor.execute('select * from table where key = %s' % quote_value(val)) and you don't have to worry about possible SQL injections.
[ "return", "the", "value", "ready", "to", "be", "used", "as", "a", "value", "in", "a", "SQL", "string", "." ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/sqlutils.py#L8-L20
azraq27/neural
neural/general.py
Analyze.load
def load(self,dset): '''load a dataset from given filename into the object''' self.dset_filename = dset self.dset = nib.load(dset) self.data = self.dset.get_data() self.header = self.dset.get_header()
python
def load(self,dset): '''load a dataset from given filename into the object''' self.dset_filename = dset self.dset = nib.load(dset) self.data = self.dset.get_data() self.header = self.dset.get_header()
[ "def", "load", "(", "self", ",", "dset", ")", ":", "self", ".", "dset_filename", "=", "dset", "self", ".", "dset", "=", "nib", ".", "load", "(", "dset", ")", "self", ".", "data", "=", "self", ".", "dset", ".", "get_data", "(", ")", "self", ".", ...
load a dataset from given filename into the object
[ "load", "a", "dataset", "from", "given", "filename", "into", "the", "object" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/general.py#L17-L22
azraq27/neural
neural/general.py
Analyze.voxel_loop
def voxel_loop(self): '''iterator that loops through each voxel and yields the coords and time series as a tuple''' # Prob not the most efficient, but the best I can do for now: for x in xrange(len(self.data)): for y in xrange(len(self.data[x])): for z in xrange(len(self.data[x][y])): yield ((x,y,z),self.data[x][y][z])
python
def voxel_loop(self): '''iterator that loops through each voxel and yields the coords and time series as a tuple''' # Prob not the most efficient, but the best I can do for now: for x in xrange(len(self.data)): for y in xrange(len(self.data[x])): for z in xrange(len(self.data[x][y])): yield ((x,y,z),self.data[x][y][z])
[ "def", "voxel_loop", "(", "self", ")", ":", "# Prob not the most efficient, but the best I can do for now:", "for", "x", "in", "xrange", "(", "len", "(", "self", ".", "data", ")", ")", ":", "for", "y", "in", "xrange", "(", "len", "(", "self", ".", "data", ...
iterator that loops through each voxel and yields the coords and time series as a tuple
[ "iterator", "that", "loops", "through", "each", "voxel", "and", "yields", "the", "coords", "and", "time", "series", "as", "a", "tuple" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/general.py#L24-L30
20c/tmpl
tmpl/__init__.py
get_engine
def get_engine(name): """ get an engine from string (engine class without Engine) """ name = name.capitalize() + 'Engine' if name in globals(): return globals()[name] raise KeyError("engine '%s' does not exist" % name)
python
def get_engine(name): """ get an engine from string (engine class without Engine) """ name = name.capitalize() + 'Engine' if name in globals(): return globals()[name] raise KeyError("engine '%s' does not exist" % name)
[ "def", "get_engine", "(", "name", ")", ":", "name", "=", "name", ".", "capitalize", "(", ")", "+", "'Engine'", "if", "name", "in", "globals", "(", ")", ":", "return", "globals", "(", ")", "[", "name", "]", "raise", "KeyError", "(", "\"engine '%s' does ...
get an engine from string (engine class without Engine)
[ "get", "an", "engine", "from", "string", "(", "engine", "class", "without", "Engine", ")" ]
train
https://github.com/20c/tmpl/blob/ed24d3b744353c93735f370a2b989ed322960ed9/tmpl/__init__.py#L10-L19
bitesofcode/xqt
xqt/wrappers/pyside.py
init
def init(scope): """ Initialize the xqt system with the PySide wrapper for the Qt system. :param scope | <dict> """ # define wrapper compatibility symbols QtCore.THREADSAFE_NONE = XThreadNone() QtGui.QDialog = QDialog # define the importable symbols scope['QtCore'] = QtCore scope['QtGui'] = QtGui scope['QtWebKit'] = lazy_import('PySide.QtWebKit') scope['QtNetwork'] = lazy_import('PySide.QtNetwork') scope['QtXml'] = lazy_import('PySide.QtXml') scope['uic'] = Uic() scope['rcc_exe'] = 'pyside-rcc' # map overrides #QtCore.SIGNAL = SIGNAL # map shared core properties QtCore.QDate.toPyDate = lambda x: x.toPython() QtCore.QDateTime.toPyDateTime = lambda x: x.toPython() QtCore.QTime.toPyTime = lambda x: x.toPython() QtCore.QStringList = list QtCore.QString = unicode
python
def init(scope): """ Initialize the xqt system with the PySide wrapper for the Qt system. :param scope | <dict> """ # define wrapper compatibility symbols QtCore.THREADSAFE_NONE = XThreadNone() QtGui.QDialog = QDialog # define the importable symbols scope['QtCore'] = QtCore scope['QtGui'] = QtGui scope['QtWebKit'] = lazy_import('PySide.QtWebKit') scope['QtNetwork'] = lazy_import('PySide.QtNetwork') scope['QtXml'] = lazy_import('PySide.QtXml') scope['uic'] = Uic() scope['rcc_exe'] = 'pyside-rcc' # map overrides #QtCore.SIGNAL = SIGNAL # map shared core properties QtCore.QDate.toPyDate = lambda x: x.toPython() QtCore.QDateTime.toPyDateTime = lambda x: x.toPython() QtCore.QTime.toPyTime = lambda x: x.toPython() QtCore.QStringList = list QtCore.QString = unicode
[ "def", "init", "(", "scope", ")", ":", "# define wrapper compatibility symbols\r", "QtCore", ".", "THREADSAFE_NONE", "=", "XThreadNone", "(", ")", "QtGui", ".", "QDialog", "=", "QDialog", "# define the importable symbols\r", "scope", "[", "'QtCore'", "]", "=", "QtCo...
Initialize the xqt system with the PySide wrapper for the Qt system. :param scope | <dict>
[ "Initialize", "the", "xqt", "system", "with", "the", "PySide", "wrapper", "for", "the", "Qt", "system", ".", ":", "param", "scope", "|", "<dict", ">" ]
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/wrappers/pyside.py#L257-L285
bitesofcode/xqt
xqt/wrappers/pyside.py
UiLoader.createAction
def createAction(self, parent=None, name=''): """ Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param parent | <QWidget> || None name | <str> """ action = super(UiLoader, self).createAction(parent, name) if not action.parent(): action.setParent(self._baseinstance) setattr(self._baseinstance, name, action) return action
python
def createAction(self, parent=None, name=''): """ Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param parent | <QWidget> || None name | <str> """ action = super(UiLoader, self).createAction(parent, name) if not action.parent(): action.setParent(self._baseinstance) setattr(self._baseinstance, name, action) return action
[ "def", "createAction", "(", "self", ",", "parent", "=", "None", ",", "name", "=", "''", ")", ":", "action", "=", "super", "(", "UiLoader", ",", "self", ")", ".", "createAction", "(", "parent", ",", "name", ")", "if", "not", "action", ".", "parent", ...
Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param parent | <QWidget> || None name | <str>
[ "Overloads", "teh", "create", "action", "method", "to", "handle", "the", "proper", "base", "instance", "information", "similar", "to", "the", "PyQt4", "loading", "system", ".", ":", "param", "parent", "|", "<QWidget", ">", "||", "None", "name", "|", "<str", ...
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/wrappers/pyside.py#L95-L107
bitesofcode/xqt
xqt/wrappers/pyside.py
UiLoader.createActionGroup
def createActionGroup(self, parent=None, name=''): """ Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param parent | <QWidget> || None name | <str> """ actionGroup = super(UiLoader, self).createActionGroup(parent, name) if not actionGroup.parent(): actionGroup.setParent(self._baseinstance) setattr(self._baseinstance, name, actionGroup) return actionGroup
python
def createActionGroup(self, parent=None, name=''): """ Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param parent | <QWidget> || None name | <str> """ actionGroup = super(UiLoader, self).createActionGroup(parent, name) if not actionGroup.parent(): actionGroup.setParent(self._baseinstance) setattr(self._baseinstance, name, actionGroup) return actionGroup
[ "def", "createActionGroup", "(", "self", ",", "parent", "=", "None", ",", "name", "=", "''", ")", ":", "actionGroup", "=", "super", "(", "UiLoader", ",", "self", ")", ".", "createActionGroup", "(", "parent", ",", "name", ")", "if", "not", "actionGroup", ...
Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param parent | <QWidget> || None name | <str>
[ "Overloads", "teh", "create", "action", "method", "to", "handle", "the", "proper", "base", "instance", "information", "similar", "to", "the", "PyQt4", "loading", "system", ".", ":", "param", "parent", "|", "<QWidget", ">", "||", "None", "name", "|", "<str", ...
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/wrappers/pyside.py#L109-L121
bitesofcode/xqt
xqt/wrappers/pyside.py
UiLoader.createLayout
def createLayout(self, className, parent=None, name=''): """ Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param className | <str> parent | <QWidget> || None name | <str> """ layout = super(UiLoader, self).createLayout(className, parent, name) setattr(self._baseinstance, name, layout) return layout
python
def createLayout(self, className, parent=None, name=''): """ Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param className | <str> parent | <QWidget> || None name | <str> """ layout = super(UiLoader, self).createLayout(className, parent, name) setattr(self._baseinstance, name, layout) return layout
[ "def", "createLayout", "(", "self", ",", "className", ",", "parent", "=", "None", ",", "name", "=", "''", ")", ":", "layout", "=", "super", "(", "UiLoader", ",", "self", ")", ".", "createLayout", "(", "className", ",", "parent", ",", "name", ")", "se...
Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param className | <str> parent | <QWidget> || None name | <str>
[ "Overloads", "teh", "create", "action", "method", "to", "handle", "the", "proper", "base", "instance", "information", "similar", "to", "the", "PyQt4", "loading", "system", ".", ":", "param", "className", "|", "<str", ">", "parent", "|", "<QWidget", ">", "||"...
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/wrappers/pyside.py#L123-L134
bitesofcode/xqt
xqt/wrappers/pyside.py
UiLoader.createWidget
def createWidget(self, className, parent=None, name=''): """ Overloads the createWidget method to handle the proper base instance information similar to the PyQt4 loading system. :param className | <str> parent | <QWidget> || None name | <str> :return <QWidget> """ className = str(className) # create a widget off one of our dynamic classes if className in self.dynamicWidgets: widget = self.dynamicWidgets[className](parent) if parent: widget.setPalette(parent.palette()) widget.setObjectName(name) # hack fix on a QWebView (will crash app otherwise) # forces a URL to the QWebView before it finishes if className == 'QWebView': widget.setUrl(QtCore.QUrl('http://www.google.com')) # create a widget from the default system else: widget = super(UiLoader, self).createWidget(className, parent, name) if parent: widget.setPalette(parent.palette()) if parent is None: return self._baseinstance else: setattr(self._baseinstance, name, widget) return widget
python
def createWidget(self, className, parent=None, name=''): """ Overloads the createWidget method to handle the proper base instance information similar to the PyQt4 loading system. :param className | <str> parent | <QWidget> || None name | <str> :return <QWidget> """ className = str(className) # create a widget off one of our dynamic classes if className in self.dynamicWidgets: widget = self.dynamicWidgets[className](parent) if parent: widget.setPalette(parent.palette()) widget.setObjectName(name) # hack fix on a QWebView (will crash app otherwise) # forces a URL to the QWebView before it finishes if className == 'QWebView': widget.setUrl(QtCore.QUrl('http://www.google.com')) # create a widget from the default system else: widget = super(UiLoader, self).createWidget(className, parent, name) if parent: widget.setPalette(parent.palette()) if parent is None: return self._baseinstance else: setattr(self._baseinstance, name, widget) return widget
[ "def", "createWidget", "(", "self", ",", "className", ",", "parent", "=", "None", ",", "name", "=", "''", ")", ":", "className", "=", "str", "(", "className", ")", "# create a widget off one of our dynamic classes\r", "if", "className", "in", "self", ".", "dyn...
Overloads the createWidget method to handle the proper base instance information similar to the PyQt4 loading system. :param className | <str> parent | <QWidget> || None name | <str> :return <QWidget>
[ "Overloads", "the", "createWidget", "method", "to", "handle", "the", "proper", "base", "instance", "information", "similar", "to", "the", "PyQt4", "loading", "system", ".", ":", "param", "className", "|", "<str", ">", "parent", "|", "<QWidget", ">", "||", "N...
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/wrappers/pyside.py#L136-L171
bitesofcode/xqt
xqt/wrappers/pyside.py
Uic.loadUi
def loadUi(self, filename, baseinstance=None): """ Generate a loader to load the filename. :param filename | <str> baseinstance | <QWidget> :return <QWidget> || None """ try: xui = ElementTree.parse(filename) except xml.parsers.expat.ExpatError: log.exception('Could not load file: %s' % filename) return None loader = UiLoader(baseinstance) # pre-load custom widgets xcustomwidgets = xui.find('customwidgets') if xcustomwidgets is not None: for xcustom in xcustomwidgets: header = xcustom.find('header').text clsname = xcustom.find('class').text if not header: continue if clsname in loader.dynamicWidgets: continue # modify the C++ headers to use the Python wrapping if '/' in header: header = 'xqt.' + '.'.join(header.split('/')[:-1]) # try to use the custom widgets try: __import__(header) module = sys.modules[header] cls = getattr(module, clsname) except (ImportError, KeyError, AttributeError): log.error('Could not load %s.%s' % (header, clsname)) continue loader.dynamicWidgets[clsname] = cls loader.registerCustomWidget(cls) # load the options ui = loader.load(filename) QtCore.QMetaObject.connectSlotsByName(ui) return ui
python
def loadUi(self, filename, baseinstance=None): """ Generate a loader to load the filename. :param filename | <str> baseinstance | <QWidget> :return <QWidget> || None """ try: xui = ElementTree.parse(filename) except xml.parsers.expat.ExpatError: log.exception('Could not load file: %s' % filename) return None loader = UiLoader(baseinstance) # pre-load custom widgets xcustomwidgets = xui.find('customwidgets') if xcustomwidgets is not None: for xcustom in xcustomwidgets: header = xcustom.find('header').text clsname = xcustom.find('class').text if not header: continue if clsname in loader.dynamicWidgets: continue # modify the C++ headers to use the Python wrapping if '/' in header: header = 'xqt.' + '.'.join(header.split('/')[:-1]) # try to use the custom widgets try: __import__(header) module = sys.modules[header] cls = getattr(module, clsname) except (ImportError, KeyError, AttributeError): log.error('Could not load %s.%s' % (header, clsname)) continue loader.dynamicWidgets[clsname] = cls loader.registerCustomWidget(cls) # load the options ui = loader.load(filename) QtCore.QMetaObject.connectSlotsByName(ui) return ui
[ "def", "loadUi", "(", "self", ",", "filename", ",", "baseinstance", "=", "None", ")", ":", "try", ":", "xui", "=", "ElementTree", ".", "parse", "(", "filename", ")", "except", "xml", ".", "parsers", ".", "expat", ".", "ExpatError", ":", "log", ".", "...
Generate a loader to load the filename. :param filename | <str> baseinstance | <QWidget> :return <QWidget> || None
[ "Generate", "a", "loader", "to", "load", "the", "filename", ".", ":", "param", "filename", "|", "<str", ">", "baseinstance", "|", "<QWidget", ">", ":", "return", "<QWidget", ">", "||", "None" ]
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/wrappers/pyside.py#L180-L229
bitesofcode/xqt
xqt/wrappers/pyside.py
QDialog.showEvent
def showEvent(self, event): """ Displays this dialog, centering on its parent. :param event | <QtCore.QShowEvent> """ super(QDialog, self).showEvent(event) if not self._centered: self._centered = True try: window = self.parent().window() center = window.geometry().center() except AttributeError: return else: self.move(center.x() - self.width() / 2, center.y() - self.height() / 2)
python
def showEvent(self, event): """ Displays this dialog, centering on its parent. :param event | <QtCore.QShowEvent> """ super(QDialog, self).showEvent(event) if not self._centered: self._centered = True try: window = self.parent().window() center = window.geometry().center() except AttributeError: return else: self.move(center.x() - self.width() / 2, center.y() - self.height() / 2)
[ "def", "showEvent", "(", "self", ",", "event", ")", ":", "super", "(", "QDialog", ",", "self", ")", ".", "showEvent", "(", "event", ")", "if", "not", "self", ".", "_centered", ":", "self", ".", "_centered", "=", "True", "try", ":", "window", "=", "...
Displays this dialog, centering on its parent. :param event | <QtCore.QShowEvent>
[ "Displays", "this", "dialog", "centering", "on", "its", "parent", ".", ":", "param", "event", "|", "<QtCore", ".", "QShowEvent", ">" ]
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/wrappers/pyside.py#L237-L253
skylander86/uriutils
uriutils/uriutils.py
get_uri_obj
def get_uri_obj(uri, storage_args={}): """ Retrieve the underlying storage object based on the URI (i.e., scheme). :param str uri: URI to get storage object for :param dict storage_args: Keyword arguments to pass to the underlying storage object """ if isinstance(uri, BaseURI): return uri uri_obj = None o = urlparse(uri) for storage in STORAGES: uri_obj = storage.parse_uri(o, storage_args=storage_args) if uri_obj is not None: break #end for if uri_obj is None: raise TypeError('<{}> is an unsupported URI.'.format(uri)) return uri_obj
python
def get_uri_obj(uri, storage_args={}): """ Retrieve the underlying storage object based on the URI (i.e., scheme). :param str uri: URI to get storage object for :param dict storage_args: Keyword arguments to pass to the underlying storage object """ if isinstance(uri, BaseURI): return uri uri_obj = None o = urlparse(uri) for storage in STORAGES: uri_obj = storage.parse_uri(o, storage_args=storage_args) if uri_obj is not None: break #end for if uri_obj is None: raise TypeError('<{}> is an unsupported URI.'.format(uri)) return uri_obj
[ "def", "get_uri_obj", "(", "uri", ",", "storage_args", "=", "{", "}", ")", ":", "if", "isinstance", "(", "uri", ",", "BaseURI", ")", ":", "return", "uri", "uri_obj", "=", "None", "o", "=", "urlparse", "(", "uri", ")", "for", "storage", "in", "STORAGE...
Retrieve the underlying storage object based on the URI (i.e., scheme). :param str uri: URI to get storage object for :param dict storage_args: Keyword arguments to pass to the underlying storage object
[ "Retrieve", "the", "underlying", "storage", "object", "based", "on", "the", "URI", "(", "i", ".", "e", ".", "scheme", ")", "." ]
train
https://github.com/skylander86/uriutils/blob/e756d9483ee884973bf3a0c9ad27ae362fbe7fc6/uriutils/uriutils.py#L28-L48
skylander86/uriutils
uriutils/uriutils.py
uri_open
def uri_open(uri, mode='rb', auto_compress=True, in_memory=True, delete_tempfile=True, textio_args={}, storage_args={}): """ Opens a URI for reading / writing. Analogous to the :func:`open` function. This method supports ``with`` context handling:: with uri_open('http://www.example.com', mode='r') as f: print(f.read()) :param str uri: URI of file to open :param str mode: Either ``rb``, ``r``, ``w``, or ``wb`` for read/write modes in binary/text respectiely :param bool auto_compress: Whether to automatically use the :mod:`gzip` module with ``.gz`` URIsF :param bool in_memory: Whether to store entire file in memory or in a local temporary file :param bool delete_tempfile: When :attr:`in_memory` is ``False``, whether to delete the temporary file on close :param dict textio_args: Keyword arguments to pass to :class:`io.TextIOWrapper` for text read/write mode :param dict storage_args: Keyword arguments to pass to the underlying storage object :returns: file-like object to URI """ if isinstance(uri, BaseURI): uri = str(uri) uri_obj = get_uri_obj(uri, storage_args) if mode == 'rb': read_mode, binary_mode = True, True elif mode == 'r': read_mode, binary_mode = True, False elif mode == 'w': read_mode, binary_mode = False, False elif mode == 'wb': read_mode, binary_mode = False, True else: raise TypeError('`mode` cannot be "{}".'.format(mode)) if read_mode: if in_memory: file_obj = BytesIO(uri_obj.get_content()) setattr(file_obj, 'name', str(uri_obj)) else: file_obj = _TemporaryURIFileIO(uri_obj=uri_obj, input_mode=True, delete_tempfile=delete_tempfile) #end if else: if in_memory: file_obj = URIBytesOutput(uri_obj) else: file_obj = _TemporaryURIFileIO(uri_obj=uri_obj, input_mode=False, pre_close_action=uri_obj.upload_file, delete_tempfile=delete_tempfile) setattr(file_obj, 'name', str(uri_obj)) #end if #end if temp_name = getattr(file_obj, 'temp_name', None) if auto_compress: _, ext = os.path.splitext(uri) ext = ext.lower() if ext == '.gz': file_obj = gzip.GzipFile(fileobj=file_obj, mode='rb' if read_mode else 'wb') #end if if not binary_mode: textio_args.setdefault('encoding', 'utf-8') file_obj = TextIOWrapper(file_obj, **textio_args) #end if if not hasattr(file_obj, 'temp_name'): setattr(file_obj, 'temp_name', temp_name) return file_obj
python
def uri_open(uri, mode='rb', auto_compress=True, in_memory=True, delete_tempfile=True, textio_args={}, storage_args={}): """ Opens a URI for reading / writing. Analogous to the :func:`open` function. This method supports ``with`` context handling:: with uri_open('http://www.example.com', mode='r') as f: print(f.read()) :param str uri: URI of file to open :param str mode: Either ``rb``, ``r``, ``w``, or ``wb`` for read/write modes in binary/text respectiely :param bool auto_compress: Whether to automatically use the :mod:`gzip` module with ``.gz`` URIsF :param bool in_memory: Whether to store entire file in memory or in a local temporary file :param bool delete_tempfile: When :attr:`in_memory` is ``False``, whether to delete the temporary file on close :param dict textio_args: Keyword arguments to pass to :class:`io.TextIOWrapper` for text read/write mode :param dict storage_args: Keyword arguments to pass to the underlying storage object :returns: file-like object to URI """ if isinstance(uri, BaseURI): uri = str(uri) uri_obj = get_uri_obj(uri, storage_args) if mode == 'rb': read_mode, binary_mode = True, True elif mode == 'r': read_mode, binary_mode = True, False elif mode == 'w': read_mode, binary_mode = False, False elif mode == 'wb': read_mode, binary_mode = False, True else: raise TypeError('`mode` cannot be "{}".'.format(mode)) if read_mode: if in_memory: file_obj = BytesIO(uri_obj.get_content()) setattr(file_obj, 'name', str(uri_obj)) else: file_obj = _TemporaryURIFileIO(uri_obj=uri_obj, input_mode=True, delete_tempfile=delete_tempfile) #end if else: if in_memory: file_obj = URIBytesOutput(uri_obj) else: file_obj = _TemporaryURIFileIO(uri_obj=uri_obj, input_mode=False, pre_close_action=uri_obj.upload_file, delete_tempfile=delete_tempfile) setattr(file_obj, 'name', str(uri_obj)) #end if #end if temp_name = getattr(file_obj, 'temp_name', None) if auto_compress: _, ext = os.path.splitext(uri) ext = ext.lower() if ext == '.gz': file_obj = gzip.GzipFile(fileobj=file_obj, mode='rb' if read_mode else 'wb') #end if if not binary_mode: textio_args.setdefault('encoding', 'utf-8') file_obj = TextIOWrapper(file_obj, **textio_args) #end if if not hasattr(file_obj, 'temp_name'): setattr(file_obj, 'temp_name', temp_name) return file_obj
[ "def", "uri_open", "(", "uri", ",", "mode", "=", "'rb'", ",", "auto_compress", "=", "True", ",", "in_memory", "=", "True", ",", "delete_tempfile", "=", "True", ",", "textio_args", "=", "{", "}", ",", "storage_args", "=", "{", "}", ")", ":", "if", "is...
Opens a URI for reading / writing. Analogous to the :func:`open` function. This method supports ``with`` context handling:: with uri_open('http://www.example.com', mode='r') as f: print(f.read()) :param str uri: URI of file to open :param str mode: Either ``rb``, ``r``, ``w``, or ``wb`` for read/write modes in binary/text respectiely :param bool auto_compress: Whether to automatically use the :mod:`gzip` module with ``.gz`` URIsF :param bool in_memory: Whether to store entire file in memory or in a local temporary file :param bool delete_tempfile: When :attr:`in_memory` is ``False``, whether to delete the temporary file on close :param dict textio_args: Keyword arguments to pass to :class:`io.TextIOWrapper` for text read/write mode :param dict storage_args: Keyword arguments to pass to the underlying storage object :returns: file-like object to URI
[ "Opens", "a", "URI", "for", "reading", "/", "writing", ".", "Analogous", "to", "the", ":", "func", ":", "open", "function", ".", "This", "method", "supports", "with", "context", "handling", "::" ]
train
https://github.com/skylander86/uriutils/blob/e756d9483ee884973bf3a0c9ad27ae362fbe7fc6/uriutils/uriutils.py#L52-L111
skylander86/uriutils
uriutils/uriutils.py
uri_read
def uri_read(*args, **kwargs): """ Reads the contents of a URI into a string or bytestring. See :func:`uri_open` for complete description of keyword parameters. :returns: Contents of URI :rtype: str, bytes """ with uri_open(*args, **kwargs) as f: content = f.read() return content
python
def uri_read(*args, **kwargs): """ Reads the contents of a URI into a string or bytestring. See :func:`uri_open` for complete description of keyword parameters. :returns: Contents of URI :rtype: str, bytes """ with uri_open(*args, **kwargs) as f: content = f.read() return content
[ "def", "uri_read", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "uri_open", "(", "*", "args", ",", "*", "*", "kwargs", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "return", "content" ]
Reads the contents of a URI into a string or bytestring. See :func:`uri_open` for complete description of keyword parameters. :returns: Contents of URI :rtype: str, bytes
[ "Reads", "the", "contents", "of", "a", "URI", "into", "a", "string", "or", "bytestring", ".", "See", ":", "func", ":", "uri_open", "for", "complete", "description", "of", "keyword", "parameters", "." ]
train
https://github.com/skylander86/uriutils/blob/e756d9483ee884973bf3a0c9ad27ae362fbe7fc6/uriutils/uriutils.py#L115-L126
skylander86/uriutils
uriutils/uriutils.py
uri_dump
def uri_dump(uri, content, mode='wb', **kwargs): """ Dumps the contents of a string/bytestring into a URI. See :func:`uri_open` for complete description of keyword parameters. :param str uri: URI to dump contents to :param str content: Contents to write to URI :param str mode: Either ``w``, or ``wb`` to write binary/text content respectiely """ if 'r' in mode: raise ValueError('Read mode is not allowed for `uri_dump`.') with uri_open(uri, mode=mode, **kwargs) as f: f.write(content) f.flush()
python
def uri_dump(uri, content, mode='wb', **kwargs): """ Dumps the contents of a string/bytestring into a URI. See :func:`uri_open` for complete description of keyword parameters. :param str uri: URI to dump contents to :param str content: Contents to write to URI :param str mode: Either ``w``, or ``wb`` to write binary/text content respectiely """ if 'r' in mode: raise ValueError('Read mode is not allowed for `uri_dump`.') with uri_open(uri, mode=mode, **kwargs) as f: f.write(content) f.flush()
[ "def", "uri_dump", "(", "uri", ",", "content", ",", "mode", "=", "'wb'", ",", "*", "*", "kwargs", ")", ":", "if", "'r'", "in", "mode", ":", "raise", "ValueError", "(", "'Read mode is not allowed for `uri_dump`.'", ")", "with", "uri_open", "(", "uri", ",", ...
Dumps the contents of a string/bytestring into a URI. See :func:`uri_open` for complete description of keyword parameters. :param str uri: URI to dump contents to :param str content: Contents to write to URI :param str mode: Either ``w``, or ``wb`` to write binary/text content respectiely
[ "Dumps", "the", "contents", "of", "a", "string", "/", "bytestring", "into", "a", "URI", ".", "See", ":", "func", ":", "uri_open", "for", "complete", "description", "of", "keyword", "parameters", "." ]
train
https://github.com/skylander86/uriutils/blob/e756d9483ee884973bf3a0c9ad27ae362fbe7fc6/uriutils/uriutils.py#L130-L144
skylander86/uriutils
uriutils/uriutils.py
uri_exists_wait
def uri_exists_wait(uri, timeout=300, interval=5, storage_args={}): """ Block / waits until URI exists. :param str uri: URI to check existence :param float timeout: Number of seconds before timing out :param float interval: Calls :func:`uri_exists` every ``interval`` seconds :param dict storage_args: Keyword arguments to pass to the underlying storage object :returns: ``True`` if URI exists :rtype: bool """ uri_obj = get_uri_obj(uri, storage_args) start_time = time.time() while time.time() - start_time < timeout: if uri_obj.exists(): return True time.sleep(interval) #end while if uri_exists(uri): return True return False
python
def uri_exists_wait(uri, timeout=300, interval=5, storage_args={}): """ Block / waits until URI exists. :param str uri: URI to check existence :param float timeout: Number of seconds before timing out :param float interval: Calls :func:`uri_exists` every ``interval`` seconds :param dict storage_args: Keyword arguments to pass to the underlying storage object :returns: ``True`` if URI exists :rtype: bool """ uri_obj = get_uri_obj(uri, storage_args) start_time = time.time() while time.time() - start_time < timeout: if uri_obj.exists(): return True time.sleep(interval) #end while if uri_exists(uri): return True return False
[ "def", "uri_exists_wait", "(", "uri", ",", "timeout", "=", "300", ",", "interval", "=", "5", ",", "storage_args", "=", "{", "}", ")", ":", "uri_obj", "=", "get_uri_obj", "(", "uri", ",", "storage_args", ")", "start_time", "=", "time", ".", "time", "(",...
Block / waits until URI exists. :param str uri: URI to check existence :param float timeout: Number of seconds before timing out :param float interval: Calls :func:`uri_exists` every ``interval`` seconds :param dict storage_args: Keyword arguments to pass to the underlying storage object :returns: ``True`` if URI exists :rtype: bool
[ "Block", "/", "waits", "until", "URI", "exists", "." ]
train
https://github.com/skylander86/uriutils/blob/e756d9483ee884973bf3a0c9ad27ae362fbe7fc6/uriutils/uriutils.py#L180-L201
spreecode/python-sofort
sofort/client.py
Client.payment
def payment(self, amount, **kwargs): """Get payment URL and new transaction ID Usage:: >>> import sofort >>> client = sofort.Client('123456', '123456', '123456', abort_url='https://mysite.com/abort') >>> t = client.pay(12, success_url='http://mysite.com?paid') >>> t.transaction 123123-321231-56A3BE0E-ACAB >>> t.payment_url https://www.sofort.com/payment/go/136b2012718da216af4c20c2ec2f51100c90406e """ params = self.config.clone()\ .update({ 'amount': amount })\ .update(kwargs) mandatory = ['abort_url', 'reasons', 'success_url'] for field in mandatory: if not params.has(field): raise ValueError('Mandatory field "{}" is not specified'.format(field)) params.reasons = [sofort.internals.strip_reason(reason) for reason in params.reasons] return self._request(sofort.xml.multipay(params), params)
python
def payment(self, amount, **kwargs): """Get payment URL and new transaction ID Usage:: >>> import sofort >>> client = sofort.Client('123456', '123456', '123456', abort_url='https://mysite.com/abort') >>> t = client.pay(12, success_url='http://mysite.com?paid') >>> t.transaction 123123-321231-56A3BE0E-ACAB >>> t.payment_url https://www.sofort.com/payment/go/136b2012718da216af4c20c2ec2f51100c90406e """ params = self.config.clone()\ .update({ 'amount': amount })\ .update(kwargs) mandatory = ['abort_url', 'reasons', 'success_url'] for field in mandatory: if not params.has(field): raise ValueError('Mandatory field "{}" is not specified'.format(field)) params.reasons = [sofort.internals.strip_reason(reason) for reason in params.reasons] return self._request(sofort.xml.multipay(params), params)
[ "def", "payment", "(", "self", ",", "amount", ",", "*", "*", "kwargs", ")", ":", "params", "=", "self", ".", "config", ".", "clone", "(", ")", ".", "update", "(", "{", "'amount'", ":", "amount", "}", ")", ".", "update", "(", "kwargs", ")", "manda...
Get payment URL and new transaction ID Usage:: >>> import sofort >>> client = sofort.Client('123456', '123456', '123456', abort_url='https://mysite.com/abort') >>> t = client.pay(12, success_url='http://mysite.com?paid') >>> t.transaction 123123-321231-56A3BE0E-ACAB >>> t.payment_url https://www.sofort.com/payment/go/136b2012718da216af4c20c2ec2f51100c90406e
[ "Get", "payment", "URL", "and", "new", "transaction", "ID" ]
train
https://github.com/spreecode/python-sofort/blob/0d467e8fd56462b2d8a0ba65085ffd9a5a4d1add/sofort/client.py#L43-L72
BendingSpoons/envious
envious/load.py
load_env
def load_env(print_vars=False): """Load environment variables from a .env file, if present. If an .env file is found in the working directory, and the listed environment variables are not already set, they will be set according to the values listed in the file. """ env_file = os.environ.get('ENV_FILE', '.env') try: variables = open(env_file).read().splitlines() for v in variables: if '=' in v: key, value = v.split('=', 1) if key.startswith('#'): continue if key not in os.environ: if value.startswith('"') and value.endswith('"') or \ value.startswith("'") and value.endswith("'"): os.environ[key] = ast.literal_eval(value) else: os.environ[key] = value if print_vars: print(key, os.environ[key]) except IOError: pass
python
def load_env(print_vars=False): """Load environment variables from a .env file, if present. If an .env file is found in the working directory, and the listed environment variables are not already set, they will be set according to the values listed in the file. """ env_file = os.environ.get('ENV_FILE', '.env') try: variables = open(env_file).read().splitlines() for v in variables: if '=' in v: key, value = v.split('=', 1) if key.startswith('#'): continue if key not in os.environ: if value.startswith('"') and value.endswith('"') or \ value.startswith("'") and value.endswith("'"): os.environ[key] = ast.literal_eval(value) else: os.environ[key] = value if print_vars: print(key, os.environ[key]) except IOError: pass
[ "def", "load_env", "(", "print_vars", "=", "False", ")", ":", "env_file", "=", "os", ".", "environ", ".", "get", "(", "'ENV_FILE'", ",", "'.env'", ")", "try", ":", "variables", "=", "open", "(", "env_file", ")", ".", "read", "(", ")", ".", "splitline...
Load environment variables from a .env file, if present. If an .env file is found in the working directory, and the listed environment variables are not already set, they will be set according to the values listed in the file.
[ "Load", "environment", "variables", "from", "a", ".", "env", "file", "if", "present", "." ]
train
https://github.com/BendingSpoons/envious/blob/7ad189c1d929401b015d4490095d7f31eaffcc1d/envious/load.py#L8-L32
duniter/duniter-python-api
examples/request_data_elasticsearch.py
main
async def main(): """ Main code (synchronous requests) """ # Create Client from endpoint string in Duniter format client = Client(ES_CORE_ENDPOINT) # Get the current node (direct REST GET request) print("\nGET g1-test/block/current/_source:") response = await client.get('g1-test/block/current/_source') print(response) # Get the node number 2 with only selected fields (direct REST GET request) print("\nGET g1-test/block/2/_source:") response = await client.get('g1-test/block/2/_source', {'_source': 'number,hash,dividend,membersCount'}) print(response) # Close client aiohttp session await client.close() # Create Client from endpoint string in Duniter format client = Client(ES_USER_ENDPOINT) # prompt entry pubkey = input("\nEnter a public key to get the user profile: ") # Get the profil of a public key (direct REST GET request) print("\nGET user/profile/{0}/_source:".format(pubkey)) response = await client.get('user/profile/{0}/_source'.format(pubkey.strip(' \n'))) print(response) # Close client aiohttp session await client.close()
python
async def main(): """ Main code (synchronous requests) """ # Create Client from endpoint string in Duniter format client = Client(ES_CORE_ENDPOINT) # Get the current node (direct REST GET request) print("\nGET g1-test/block/current/_source:") response = await client.get('g1-test/block/current/_source') print(response) # Get the node number 2 with only selected fields (direct REST GET request) print("\nGET g1-test/block/2/_source:") response = await client.get('g1-test/block/2/_source', {'_source': 'number,hash,dividend,membersCount'}) print(response) # Close client aiohttp session await client.close() # Create Client from endpoint string in Duniter format client = Client(ES_USER_ENDPOINT) # prompt entry pubkey = input("\nEnter a public key to get the user profile: ") # Get the profil of a public key (direct REST GET request) print("\nGET user/profile/{0}/_source:".format(pubkey)) response = await client.get('user/profile/{0}/_source'.format(pubkey.strip(' \n'))) print(response) # Close client aiohttp session await client.close()
[ "async", "def", "main", "(", ")", ":", "# Create Client from endpoint string in Duniter format", "client", "=", "Client", "(", "ES_CORE_ENDPOINT", ")", "# Get the current node (direct REST GET request)", "print", "(", "\"\\nGET g1-test/block/current/_source:\"", ")", "response", ...
Main code (synchronous requests)
[ "Main", "code", "(", "synchronous", "requests", ")" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/examples/request_data_elasticsearch.py#L20-L52
renzon/gaeforms
gaeforms/ndb/form.py
ModelForm.fill_model
def fill_model(self, model=None): """ Populates a model with normalized properties. If no model is provided (None) a new one will be created. :param model: model to be populade :return: populated model """ normalized_dct = self.normalize() if model: if not isinstance(model, self._model_class): raise ModelFormSecurityError('%s should be %s instance' % (model, self._model_class.__name__)) model.populate(**normalized_dct) return model return self._model_class(**normalized_dct)
python
def fill_model(self, model=None): """ Populates a model with normalized properties. If no model is provided (None) a new one will be created. :param model: model to be populade :return: populated model """ normalized_dct = self.normalize() if model: if not isinstance(model, self._model_class): raise ModelFormSecurityError('%s should be %s instance' % (model, self._model_class.__name__)) model.populate(**normalized_dct) return model return self._model_class(**normalized_dct)
[ "def", "fill_model", "(", "self", ",", "model", "=", "None", ")", ":", "normalized_dct", "=", "self", ".", "normalize", "(", ")", "if", "model", ":", "if", "not", "isinstance", "(", "model", ",", "self", ".", "_model_class", ")", ":", "raise", "ModelFo...
Populates a model with normalized properties. If no model is provided (None) a new one will be created. :param model: model to be populade :return: populated model
[ "Populates", "a", "model", "with", "normalized", "properties", ".", "If", "no", "model", "is", "provided", "(", "None", ")", "a", "new", "one", "will", "be", "created", ".", ":", "param", "model", ":", "model", "to", "be", "populade", ":", "return", ":...
train
https://github.com/renzon/gaeforms/blob/7d3f4d964f087c992fe92bc8d41222010b7f6430/gaeforms/ndb/form.py#L99-L111
renzon/gaeforms
gaeforms/ndb/form.py
ModelForm.fill_with_model
def fill_with_model(self, model, *fields): """ Populates this form with localized properties from model. :param fields: string list indicating the fields to include. If None, all fields defined on form will be used :param model: model :return: dict with localized properties """ model_dct = model.to_dict(include=self._fields.keys()) localized_dct = self.localize(*fields, **model_dct) if model.key: localized_dct['id'] = model.key.id() return localized_dct
python
def fill_with_model(self, model, *fields): """ Populates this form with localized properties from model. :param fields: string list indicating the fields to include. If None, all fields defined on form will be used :param model: model :return: dict with localized properties """ model_dct = model.to_dict(include=self._fields.keys()) localized_dct = self.localize(*fields, **model_dct) if model.key: localized_dct['id'] = model.key.id() return localized_dct
[ "def", "fill_with_model", "(", "self", ",", "model", ",", "*", "fields", ")", ":", "model_dct", "=", "model", ".", "to_dict", "(", "include", "=", "self", ".", "_fields", ".", "keys", "(", ")", ")", "localized_dct", "=", "self", ".", "localize", "(", ...
Populates this form with localized properties from model. :param fields: string list indicating the fields to include. If None, all fields defined on form will be used :param model: model :return: dict with localized properties
[ "Populates", "this", "form", "with", "localized", "properties", "from", "model", ".", ":", "param", "fields", ":", "string", "list", "indicating", "the", "fields", "to", "include", ".", "If", "None", "all", "fields", "defined", "on", "form", "will", "be", ...
train
https://github.com/renzon/gaeforms/blob/7d3f4d964f087c992fe92bc8d41222010b7f6430/gaeforms/ndb/form.py#L113-L124
jonathanlloyd/envpy
envpy/__init__.py
get_config
def get_config(config_schema, env=None): """Parse config from the environment against a given schema Args: config_schema: A dictionary mapping keys in the environment to envpy Schema objects describing the expected value. env: An optional dictionary used to override the environment rather than getting it from the os. Returns: A dictionary which maps the values pulled from the environment and parsed against the given schema. Raises: MissingConfigError: A value in the schema with no default could not be found in the environment. ParsingError: A value was found in the environment but could not be parsed into the given value type. """ if env is None: env = os.environ return parser.parse_env( config_schema, env, )
python
def get_config(config_schema, env=None): """Parse config from the environment against a given schema Args: config_schema: A dictionary mapping keys in the environment to envpy Schema objects describing the expected value. env: An optional dictionary used to override the environment rather than getting it from the os. Returns: A dictionary which maps the values pulled from the environment and parsed against the given schema. Raises: MissingConfigError: A value in the schema with no default could not be found in the environment. ParsingError: A value was found in the environment but could not be parsed into the given value type. """ if env is None: env = os.environ return parser.parse_env( config_schema, env, )
[ "def", "get_config", "(", "config_schema", ",", "env", "=", "None", ")", ":", "if", "env", "is", "None", ":", "env", "=", "os", ".", "environ", "return", "parser", ".", "parse_env", "(", "config_schema", ",", "env", ",", ")" ]
Parse config from the environment against a given schema Args: config_schema: A dictionary mapping keys in the environment to envpy Schema objects describing the expected value. env: An optional dictionary used to override the environment rather than getting it from the os. Returns: A dictionary which maps the values pulled from the environment and parsed against the given schema. Raises: MissingConfigError: A value in the schema with no default could not be found in the environment. ParsingError: A value was found in the environment but could not be parsed into the given value type.
[ "Parse", "config", "from", "the", "environment", "against", "a", "given", "schema" ]
train
https://github.com/jonathanlloyd/envpy/blob/b3fa1cd0defc95ba76a36810653f9c7fe4f51ccc/envpy/__init__.py#L15-L44
racker/scrivener
scrivener/_thrift/scribe/scribe.py
Client.Log
def Log(self, messages): """ Parameters: - messages """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_Log(messages) return d
python
def Log(self, messages): """ Parameters: - messages """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_Log(messages) return d
[ "def", "Log", "(", "self", ",", "messages", ")", ":", "self", ".", "_seqid", "+=", "1", "d", "=", "self", ".", "_reqs", "[", "self", ".", "_seqid", "]", "=", "defer", ".", "Deferred", "(", ")", "self", ".", "send_Log", "(", "messages", ")", "retu...
Parameters: - messages
[ "Parameters", ":", "-", "messages" ]
train
https://github.com/racker/scrivener/blob/d94e965d03b2ef661fa4f1df63f6ab238d48727d/scrivener/_thrift/scribe/scribe.py#L41-L49
frnmst/fpyutils
fpyutils/filelines.py
get_line_matches
def get_line_matches(input_file: str, pattern: str, max_occurrencies: int = 0, loose_matching: bool = True) -> dict: r"""Get the line numbers of matched patterns. :parameter input_file: the file that needs to be read. :parameter pattern: the pattern that needs to be searched. :parameter max_occurrencies: the maximum number of expected occurrencies. Defaults to ``0`` which means that all occurrencies will be matched. :parameter loose_matching: ignore leading and trailing whitespace characters for both pattern and matched strings. Defaults to ``True``. :type input_file: str :type pattern: str :type max_occurrencies: int :type loose_matching: bool :returns: occurrency_matches, A dictionary where each key corresponds to the number of occurrencies and each value to the matched line number. If no match was found for that particular occurrency, the key is not set. This means means for example: if the first occurrency of pattern is at line y then: x[1] = y. :rtype: dict :raises: a built-in exception. .. note:: Line numbers start from ``1``. """ assert max_occurrencies >= 0 occurrency_counter = 0.0 occurrency_matches = dict() if max_occurrencies == 0: max_occurrencies = float('inf') if loose_matching: pattern = pattern.strip() line_counter = 1 with open(input_file, 'r') as f: line = f.readline() while line and occurrency_counter < max_occurrencies: if loose_matching: line = line.strip() if line == pattern: occurrency_counter += 1.0 occurrency_matches[int(occurrency_counter)] = line_counter line = f.readline() line_counter += 1 return occurrency_matches
python
def get_line_matches(input_file: str, pattern: str, max_occurrencies: int = 0, loose_matching: bool = True) -> dict: r"""Get the line numbers of matched patterns. :parameter input_file: the file that needs to be read. :parameter pattern: the pattern that needs to be searched. :parameter max_occurrencies: the maximum number of expected occurrencies. Defaults to ``0`` which means that all occurrencies will be matched. :parameter loose_matching: ignore leading and trailing whitespace characters for both pattern and matched strings. Defaults to ``True``. :type input_file: str :type pattern: str :type max_occurrencies: int :type loose_matching: bool :returns: occurrency_matches, A dictionary where each key corresponds to the number of occurrencies and each value to the matched line number. If no match was found for that particular occurrency, the key is not set. This means means for example: if the first occurrency of pattern is at line y then: x[1] = y. :rtype: dict :raises: a built-in exception. .. note:: Line numbers start from ``1``. """ assert max_occurrencies >= 0 occurrency_counter = 0.0 occurrency_matches = dict() if max_occurrencies == 0: max_occurrencies = float('inf') if loose_matching: pattern = pattern.strip() line_counter = 1 with open(input_file, 'r') as f: line = f.readline() while line and occurrency_counter < max_occurrencies: if loose_matching: line = line.strip() if line == pattern: occurrency_counter += 1.0 occurrency_matches[int(occurrency_counter)] = line_counter line = f.readline() line_counter += 1 return occurrency_matches
[ "def", "get_line_matches", "(", "input_file", ":", "str", ",", "pattern", ":", "str", ",", "max_occurrencies", ":", "int", "=", "0", ",", "loose_matching", ":", "bool", "=", "True", ")", "->", "dict", ":", "assert", "max_occurrencies", ">=", "0", "occurren...
r"""Get the line numbers of matched patterns. :parameter input_file: the file that needs to be read. :parameter pattern: the pattern that needs to be searched. :parameter max_occurrencies: the maximum number of expected occurrencies. Defaults to ``0`` which means that all occurrencies will be matched. :parameter loose_matching: ignore leading and trailing whitespace characters for both pattern and matched strings. Defaults to ``True``. :type input_file: str :type pattern: str :type max_occurrencies: int :type loose_matching: bool :returns: occurrency_matches, A dictionary where each key corresponds to the number of occurrencies and each value to the matched line number. If no match was found for that particular occurrency, the key is not set. This means means for example: if the first occurrency of pattern is at line y then: x[1] = y. :rtype: dict :raises: a built-in exception. .. note:: Line numbers start from ``1``.
[ "r", "Get", "the", "line", "numbers", "of", "matched", "patterns", "." ]
train
https://github.com/frnmst/fpyutils/blob/74a9e15af4020248dda5ec6d25e05571c7717f20/fpyutils/filelines.py#L27-L76
frnmst/fpyutils
fpyutils/filelines.py
insert_string_at_line
def insert_string_at_line(input_file: str, string_to_be_inserted: str, put_at_line_number: int, output_file: str, append: bool = True, newline_character: str = '\n'): r"""Write a string at the specified line. :parameter input_file: the file that needs to be read. :parameter string_to_be_inserted: the string that needs to be added. :parameter put_at_line_number: the line number on which to append the string. :parameter output_file: the file that needs to be written with the new content. :parameter append: decides whether to append or prepend the string at the selected line. Defaults to ``True``. :parameter newline_character: set the character used to fill the file in case line_number is greater than the number of lines of input_file. Defaults to ``\n``. :type input_file: str :type string_to_be_inserted: str :type line_number: int :type output_file: str :type append: bool :type newline_character: str :returns: None :raises: LineOutOfFileBoundsError or a built-in exception. .. note:: Line numbers start from ``1``. """ assert put_at_line_number >= 1 with open(input_file, 'r') as f: lines = f.readlines() line_counter = 1 i = 0 loop = True extra_lines_done = False line_number_after_eof = len(lines) + 1 with atomic_write(output_file, overwrite=True) as f: while loop: if put_at_line_number > len( lines) and line_counter == line_number_after_eof: # There are extra lines to write. line = str() else: line = lines[i] # It is ok if the position of line to be written is greater # than the last line number of the input file. We just need to add # the appropriate number of new line characters which will fill # the non existing lines of the output file. if put_at_line_number > len( lines) and line_counter == line_number_after_eof: for additional_newlines in range( 0, put_at_line_number - len(lines) - 1): # Skip the newline in the line where we need to insert # the new string. f.write(newline_character) line_counter += 1 i += 1 extra_lines_done = True if line_counter == put_at_line_number: # A very simple append operation: if the original line ends # with a '\n' character, the string will be added on the next # line... if append: line = line + string_to_be_inserted # ...otherwise the string is prepended. else: line = string_to_be_inserted + line f.write(line) line_counter += 1 i += 1 # Quit the loop if there is nothing more to write. if i >= len(lines): loop = False # Continue looping if there are still extra lines to write. if put_at_line_number > len(lines) and not extra_lines_done: loop = True
python
def insert_string_at_line(input_file: str, string_to_be_inserted: str, put_at_line_number: int, output_file: str, append: bool = True, newline_character: str = '\n'): r"""Write a string at the specified line. :parameter input_file: the file that needs to be read. :parameter string_to_be_inserted: the string that needs to be added. :parameter put_at_line_number: the line number on which to append the string. :parameter output_file: the file that needs to be written with the new content. :parameter append: decides whether to append or prepend the string at the selected line. Defaults to ``True``. :parameter newline_character: set the character used to fill the file in case line_number is greater than the number of lines of input_file. Defaults to ``\n``. :type input_file: str :type string_to_be_inserted: str :type line_number: int :type output_file: str :type append: bool :type newline_character: str :returns: None :raises: LineOutOfFileBoundsError or a built-in exception. .. note:: Line numbers start from ``1``. """ assert put_at_line_number >= 1 with open(input_file, 'r') as f: lines = f.readlines() line_counter = 1 i = 0 loop = True extra_lines_done = False line_number_after_eof = len(lines) + 1 with atomic_write(output_file, overwrite=True) as f: while loop: if put_at_line_number > len( lines) and line_counter == line_number_after_eof: # There are extra lines to write. line = str() else: line = lines[i] # It is ok if the position of line to be written is greater # than the last line number of the input file. We just need to add # the appropriate number of new line characters which will fill # the non existing lines of the output file. if put_at_line_number > len( lines) and line_counter == line_number_after_eof: for additional_newlines in range( 0, put_at_line_number - len(lines) - 1): # Skip the newline in the line where we need to insert # the new string. f.write(newline_character) line_counter += 1 i += 1 extra_lines_done = True if line_counter == put_at_line_number: # A very simple append operation: if the original line ends # with a '\n' character, the string will be added on the next # line... if append: line = line + string_to_be_inserted # ...otherwise the string is prepended. else: line = string_to_be_inserted + line f.write(line) line_counter += 1 i += 1 # Quit the loop if there is nothing more to write. if i >= len(lines): loop = False # Continue looping if there are still extra lines to write. if put_at_line_number > len(lines) and not extra_lines_done: loop = True
[ "def", "insert_string_at_line", "(", "input_file", ":", "str", ",", "string_to_be_inserted", ":", "str", ",", "put_at_line_number", ":", "int", ",", "output_file", ":", "str", ",", "append", ":", "bool", "=", "True", ",", "newline_character", ":", "str", "=", ...
r"""Write a string at the specified line. :parameter input_file: the file that needs to be read. :parameter string_to_be_inserted: the string that needs to be added. :parameter put_at_line_number: the line number on which to append the string. :parameter output_file: the file that needs to be written with the new content. :parameter append: decides whether to append or prepend the string at the selected line. Defaults to ``True``. :parameter newline_character: set the character used to fill the file in case line_number is greater than the number of lines of input_file. Defaults to ``\n``. :type input_file: str :type string_to_be_inserted: str :type line_number: int :type output_file: str :type append: bool :type newline_character: str :returns: None :raises: LineOutOfFileBoundsError or a built-in exception. .. note:: Line numbers start from ``1``.
[ "r", "Write", "a", "string", "at", "the", "specified", "line", "." ]
train
https://github.com/frnmst/fpyutils/blob/74a9e15af4020248dda5ec6d25e05571c7717f20/fpyutils/filelines.py#L79-L160
frnmst/fpyutils
fpyutils/filelines.py
remove_line_interval
def remove_line_interval(input_file: str, delete_line_from: int, delete_line_to: int, output_file: str): r"""Remove a line interval. :parameter input_file: the file that needs to be read. :parameter delete_line_from: the line number from which start deleting. :parameter delete_line_to: the line number to which stop deleting. :parameter output_file: the file that needs to be written without the selected lines. :type input_file: str :type delete_line_from: int :type delete_line_to: int :type output_file: str :returns: None :raises: LineOutOfFileBoundsError or a built-in exception. .. note:: Line numbers start from ``1``. .. note:: It is possible to remove a single line only. This happens when the parameters delete_line_from and delete_line_to are equal. """ assert delete_line_from >= 1 assert delete_line_to >= 1 with open(input_file, 'r') as f: lines = f.readlines() # Invalid line ranges. # Base case delete_line_to - delete_line_from == 0: single line. if delete_line_to - delete_line_from < 0: raise NegativeLineRangeError if delete_line_from > len(lines) or delete_line_to > len(lines): raise LineOutOfFileBoundsError line_counter = 1 # Rewrite the file without the string. with atomic_write(output_file, overwrite=True) as f: for line in lines: # Ignore the line interval where the content to be deleted lies. if line_counter >= delete_line_from and line_counter <= delete_line_to: pass # Write the rest of the file. else: f.write(line) line_counter += 1
python
def remove_line_interval(input_file: str, delete_line_from: int, delete_line_to: int, output_file: str): r"""Remove a line interval. :parameter input_file: the file that needs to be read. :parameter delete_line_from: the line number from which start deleting. :parameter delete_line_to: the line number to which stop deleting. :parameter output_file: the file that needs to be written without the selected lines. :type input_file: str :type delete_line_from: int :type delete_line_to: int :type output_file: str :returns: None :raises: LineOutOfFileBoundsError or a built-in exception. .. note:: Line numbers start from ``1``. .. note:: It is possible to remove a single line only. This happens when the parameters delete_line_from and delete_line_to are equal. """ assert delete_line_from >= 1 assert delete_line_to >= 1 with open(input_file, 'r') as f: lines = f.readlines() # Invalid line ranges. # Base case delete_line_to - delete_line_from == 0: single line. if delete_line_to - delete_line_from < 0: raise NegativeLineRangeError if delete_line_from > len(lines) or delete_line_to > len(lines): raise LineOutOfFileBoundsError line_counter = 1 # Rewrite the file without the string. with atomic_write(output_file, overwrite=True) as f: for line in lines: # Ignore the line interval where the content to be deleted lies. if line_counter >= delete_line_from and line_counter <= delete_line_to: pass # Write the rest of the file. else: f.write(line) line_counter += 1
[ "def", "remove_line_interval", "(", "input_file", ":", "str", ",", "delete_line_from", ":", "int", ",", "delete_line_to", ":", "int", ",", "output_file", ":", "str", ")", ":", "assert", "delete_line_from", ">=", "1", "assert", "delete_line_to", ">=", "1", "wit...
r"""Remove a line interval. :parameter input_file: the file that needs to be read. :parameter delete_line_from: the line number from which start deleting. :parameter delete_line_to: the line number to which stop deleting. :parameter output_file: the file that needs to be written without the selected lines. :type input_file: str :type delete_line_from: int :type delete_line_to: int :type output_file: str :returns: None :raises: LineOutOfFileBoundsError or a built-in exception. .. note:: Line numbers start from ``1``. .. note:: It is possible to remove a single line only. This happens when the parameters delete_line_from and delete_line_to are equal.
[ "r", "Remove", "a", "line", "interval", "." ]
train
https://github.com/frnmst/fpyutils/blob/74a9e15af4020248dda5ec6d25e05571c7717f20/fpyutils/filelines.py#L167-L213
shaypal5/barn
barn/azure.py
upload_dataset
def upload_dataset( dataset_name, file_path, task=None, dataset_attributes=None, **kwargs): """Uploads the given file to dataset store. Parameters ---------- dataset_name : str The name of the dataset to upload. file_path : str The full path to the file to upload task : str, optional The task for which the given dataset is used for. If not given, a path for the corresponding task-agnostic directory is used. dataset_attributes : dict, optional Additional attributes of the datasets. Used to generate additional sub-folders on the blob "path". For example, providing 'lang=en' will results in a path such as '/lang_en/mydataset.csv'. Hierarchy always matches lexicographical order of keyword argument names, so 'lang=en' and 'animal=dog' will result in a path such as 'task_name/animal_dof/lang_en/dset.csv'. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.create_blob_from_path. """ fname = ntpath.basename(file_path) blob_name = _blob_name( dataset_name=dataset_name, file_name=fname, task=task, dataset_attributes=dataset_attributes, ) print(blob_name) _blob_service().create_blob_from_path( container_name=BARN_CFG['azure']['container_name'], blob_name=blob_name, file_path=file_path, **kwargs, )
python
def upload_dataset( dataset_name, file_path, task=None, dataset_attributes=None, **kwargs): """Uploads the given file to dataset store. Parameters ---------- dataset_name : str The name of the dataset to upload. file_path : str The full path to the file to upload task : str, optional The task for which the given dataset is used for. If not given, a path for the corresponding task-agnostic directory is used. dataset_attributes : dict, optional Additional attributes of the datasets. Used to generate additional sub-folders on the blob "path". For example, providing 'lang=en' will results in a path such as '/lang_en/mydataset.csv'. Hierarchy always matches lexicographical order of keyword argument names, so 'lang=en' and 'animal=dog' will result in a path such as 'task_name/animal_dof/lang_en/dset.csv'. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.create_blob_from_path. """ fname = ntpath.basename(file_path) blob_name = _blob_name( dataset_name=dataset_name, file_name=fname, task=task, dataset_attributes=dataset_attributes, ) print(blob_name) _blob_service().create_blob_from_path( container_name=BARN_CFG['azure']['container_name'], blob_name=blob_name, file_path=file_path, **kwargs, )
[ "def", "upload_dataset", "(", "dataset_name", ",", "file_path", ",", "task", "=", "None", ",", "dataset_attributes", "=", "None", ",", "*", "*", "kwargs", ")", ":", "fname", "=", "ntpath", ".", "basename", "(", "file_path", ")", "blob_name", "=", "_blob_na...
Uploads the given file to dataset store. Parameters ---------- dataset_name : str The name of the dataset to upload. file_path : str The full path to the file to upload task : str, optional The task for which the given dataset is used for. If not given, a path for the corresponding task-agnostic directory is used. dataset_attributes : dict, optional Additional attributes of the datasets. Used to generate additional sub-folders on the blob "path". For example, providing 'lang=en' will results in a path such as '/lang_en/mydataset.csv'. Hierarchy always matches lexicographical order of keyword argument names, so 'lang=en' and 'animal=dog' will result in a path such as 'task_name/animal_dof/lang_en/dset.csv'. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.create_blob_from_path.
[ "Uploads", "the", "given", "file", "to", "dataset", "store", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/azure.py#L72-L109
shaypal5/barn
barn/azure.py
download_dataset
def download_dataset( dataset_name, file_path, task=None, dataset_attributes=None, **kwargs): """Downloads the given dataset from dataset store. Parameters ---------- dataset_name : str The name of the dataset to upload. file_path : str The full path to the file to upload task : str, optional The task for which the given dataset is used for. If not given, a path for the corresponding task-agnostic directory is used. dataset_attributes : dict, optional Additional attributes of the datasets. Used to generate additional sub-folders on the blob "path". For example, providing 'lang=en' will results in a path such as '/lang_en/mydataset.csv'. Hierarchy always matches lexicographical order of keyword argument names, so 'lang=en' and 'animal=dog' will result in a path such as 'task_name/animal_dof/lang_en/dset.csv'. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.get_blob_to_path. """ fname = ntpath.basename(file_path) blob_name = _blob_name( dataset_name=dataset_name, file_name=fname, task=task, dataset_attributes=dataset_attributes, ) # print("Downloading blob: {}".format(blob_name)) try: _blob_service().get_blob_to_path( container_name=BARN_CFG['azure']['container_name'], blob_name=blob_name, file_path=file_path, **kwargs, ) except Exception as e: if os.path.isfile(file_path): os.remove(file_path) raise MissingDatasetError( "With blob {}.".format(blob_name)) from e
python
def download_dataset( dataset_name, file_path, task=None, dataset_attributes=None, **kwargs): """Downloads the given dataset from dataset store. Parameters ---------- dataset_name : str The name of the dataset to upload. file_path : str The full path to the file to upload task : str, optional The task for which the given dataset is used for. If not given, a path for the corresponding task-agnostic directory is used. dataset_attributes : dict, optional Additional attributes of the datasets. Used to generate additional sub-folders on the blob "path". For example, providing 'lang=en' will results in a path such as '/lang_en/mydataset.csv'. Hierarchy always matches lexicographical order of keyword argument names, so 'lang=en' and 'animal=dog' will result in a path such as 'task_name/animal_dof/lang_en/dset.csv'. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.get_blob_to_path. """ fname = ntpath.basename(file_path) blob_name = _blob_name( dataset_name=dataset_name, file_name=fname, task=task, dataset_attributes=dataset_attributes, ) # print("Downloading blob: {}".format(blob_name)) try: _blob_service().get_blob_to_path( container_name=BARN_CFG['azure']['container_name'], blob_name=blob_name, file_path=file_path, **kwargs, ) except Exception as e: if os.path.isfile(file_path): os.remove(file_path) raise MissingDatasetError( "With blob {}.".format(blob_name)) from e
[ "def", "download_dataset", "(", "dataset_name", ",", "file_path", ",", "task", "=", "None", ",", "dataset_attributes", "=", "None", ",", "*", "*", "kwargs", ")", ":", "fname", "=", "ntpath", ".", "basename", "(", "file_path", ")", "blob_name", "=", "_blob_...
Downloads the given dataset from dataset store. Parameters ---------- dataset_name : str The name of the dataset to upload. file_path : str The full path to the file to upload task : str, optional The task for which the given dataset is used for. If not given, a path for the corresponding task-agnostic directory is used. dataset_attributes : dict, optional Additional attributes of the datasets. Used to generate additional sub-folders on the blob "path". For example, providing 'lang=en' will results in a path such as '/lang_en/mydataset.csv'. Hierarchy always matches lexicographical order of keyword argument names, so 'lang=en' and 'animal=dog' will result in a path such as 'task_name/animal_dof/lang_en/dset.csv'. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.get_blob_to_path.
[ "Downloads", "the", "given", "dataset", "from", "dataset", "store", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/azure.py#L112-L155
asmodehn/filefinder2
filefinder2/util.py
_resolve_name
def _resolve_name(name, package, level): """Resolve a relative module name to an absolute one.""" bits = package.rsplit('.', level - 1) if len(bits) < level: raise ValueError('attempted relative import beyond top-level package') base = bits[0] return '{}.{}'.format(base, name) if name else base
python
def _resolve_name(name, package, level): """Resolve a relative module name to an absolute one.""" bits = package.rsplit('.', level - 1) if len(bits) < level: raise ValueError('attempted relative import beyond top-level package') base = bits[0] return '{}.{}'.format(base, name) if name else base
[ "def", "_resolve_name", "(", "name", ",", "package", ",", "level", ")", ":", "bits", "=", "package", ".", "rsplit", "(", "'.'", ",", "level", "-", "1", ")", "if", "len", "(", "bits", ")", "<", "level", ":", "raise", "ValueError", "(", "'attempted rel...
Resolve a relative module name to an absolute one.
[ "Resolve", "a", "relative", "module", "name", "to", "an", "absolute", "one", "." ]
train
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/util.py#L23-L29
asmodehn/filefinder2
filefinder2/util.py
resolve_name
def resolve_name(name, package): """Resolve a relative module name to an absolute one.""" if not name.startswith('.'): return name elif not package: raise ValueError('{!r} is not a relative name ' '(no leading dot)'.format(name)) level = 0 for character in name: if character != '.': break level += 1 return _resolve_name(name[level:], package, level)
python
def resolve_name(name, package): """Resolve a relative module name to an absolute one.""" if not name.startswith('.'): return name elif not package: raise ValueError('{!r} is not a relative name ' '(no leading dot)'.format(name)) level = 0 for character in name: if character != '.': break level += 1 return _resolve_name(name[level:], package, level)
[ "def", "resolve_name", "(", "name", ",", "package", ")", ":", "if", "not", "name", ".", "startswith", "(", "'.'", ")", ":", "return", "name", "elif", "not", "package", ":", "raise", "ValueError", "(", "'{!r} is not a relative name '", "'(no leading dot)'", "."...
Resolve a relative module name to an absolute one.
[ "Resolve", "a", "relative", "module", "name", "to", "an", "absolute", "one", "." ]
train
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/util.py#L32-L44
asmodehn/filefinder2
filefinder2/util.py
_find_spec_from_path
def _find_spec_from_path(name, path=None): """Return the spec for the specified module. First, sys.modules is checked to see if the module was already imported. If so, then sys.modules[name].__spec__ is returned. If that happens to be set to None, then ValueError is raised. If the module is not in sys.modules, then sys.meta_path is searched for a suitable spec with the value of 'path' given to the finders. None is returned if no spec could be found. Dotted names do not have their parent packages implicitly imported. You will most likely need to explicitly import all parent packages in the proper order for a submodule to get the correct spec. """ if name not in sys.modules: return _find_spec(name, path) else: module = sys.modules[name] if module is None: return None try: spec = module.__spec__ except AttributeError: six.raise_from(ValueError('{}.__spec__ is not set'.format(name)), None) else: if spec is None: raise ValueError('{}.__spec__ is None'.format(name)) return spec
python
def _find_spec_from_path(name, path=None): """Return the spec for the specified module. First, sys.modules is checked to see if the module was already imported. If so, then sys.modules[name].__spec__ is returned. If that happens to be set to None, then ValueError is raised. If the module is not in sys.modules, then sys.meta_path is searched for a suitable spec with the value of 'path' given to the finders. None is returned if no spec could be found. Dotted names do not have their parent packages implicitly imported. You will most likely need to explicitly import all parent packages in the proper order for a submodule to get the correct spec. """ if name not in sys.modules: return _find_spec(name, path) else: module = sys.modules[name] if module is None: return None try: spec = module.__spec__ except AttributeError: six.raise_from(ValueError('{}.__spec__ is not set'.format(name)), None) else: if spec is None: raise ValueError('{}.__spec__ is None'.format(name)) return spec
[ "def", "_find_spec_from_path", "(", "name", ",", "path", "=", "None", ")", ":", "if", "name", "not", "in", "sys", ".", "modules", ":", "return", "_find_spec", "(", "name", ",", "path", ")", "else", ":", "module", "=", "sys", ".", "modules", "[", "nam...
Return the spec for the specified module. First, sys.modules is checked to see if the module was already imported. If so, then sys.modules[name].__spec__ is returned. If that happens to be set to None, then ValueError is raised. If the module is not in sys.modules, then sys.meta_path is searched for a suitable spec with the value of 'path' given to the finders. None is returned if no spec could be found. Dotted names do not have their parent packages implicitly imported. You will most likely need to explicitly import all parent packages in the proper order for a submodule to get the correct spec.
[ "Return", "the", "spec", "for", "the", "specified", "module", ".", "First", "sys", ".", "modules", "is", "checked", "to", "see", "if", "the", "module", "was", "already", "imported", ".", "If", "so", "then", "sys", ".", "modules", "[", "name", "]", ".",...
train
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/util.py#L47-L72
asmodehn/filefinder2
filefinder2/util.py
find_spec
def find_spec(name, package=None): """Return the spec for the specified module. First, sys.modules is checked to see if the module was already imported. If so, then sys.modules[name].__spec__ is returned. If that happens to be set to None, then ValueError is raised. If the module is not in sys.modules, then sys.meta_path is searched for a suitable spec with the value of 'path' given to the finders. None is returned if no spec could be found. If the name is for submodule (contains a dot), the parent module is automatically imported. The name and package arguments work the same as importlib.import_module(). In other words, relative module names (with leading dots) work. """ fullname = resolve_name(name, package) if name.startswith('.') else name if fullname not in sys.modules: parent_name = fullname.rpartition('.')[0] if parent_name: # Use builtins.__import__() in case someone replaced it. parent = __import__(parent_name, fromlist=['__path__']) return _find_spec(fullname, parent.__path__) else: return _find_spec(fullname, None) else: module = sys.modules[fullname] if module is None: return None try: spec = module.__spec__ except AttributeError: six.raise_from(ValueError('{}.__spec__ is not set'.format(name)), None) else: if spec is None: raise ValueError('{}.__spec__ is None'.format(name)) return spec
python
def find_spec(name, package=None): """Return the spec for the specified module. First, sys.modules is checked to see if the module was already imported. If so, then sys.modules[name].__spec__ is returned. If that happens to be set to None, then ValueError is raised. If the module is not in sys.modules, then sys.meta_path is searched for a suitable spec with the value of 'path' given to the finders. None is returned if no spec could be found. If the name is for submodule (contains a dot), the parent module is automatically imported. The name and package arguments work the same as importlib.import_module(). In other words, relative module names (with leading dots) work. """ fullname = resolve_name(name, package) if name.startswith('.') else name if fullname not in sys.modules: parent_name = fullname.rpartition('.')[0] if parent_name: # Use builtins.__import__() in case someone replaced it. parent = __import__(parent_name, fromlist=['__path__']) return _find_spec(fullname, parent.__path__) else: return _find_spec(fullname, None) else: module = sys.modules[fullname] if module is None: return None try: spec = module.__spec__ except AttributeError: six.raise_from(ValueError('{}.__spec__ is not set'.format(name)), None) else: if spec is None: raise ValueError('{}.__spec__ is None'.format(name)) return spec
[ "def", "find_spec", "(", "name", ",", "package", "=", "None", ")", ":", "fullname", "=", "resolve_name", "(", "name", ",", "package", ")", "if", "name", ".", "startswith", "(", "'.'", ")", "else", "name", "if", "fullname", "not", "in", "sys", ".", "m...
Return the spec for the specified module. First, sys.modules is checked to see if the module was already imported. If so, then sys.modules[name].__spec__ is returned. If that happens to be set to None, then ValueError is raised. If the module is not in sys.modules, then sys.meta_path is searched for a suitable spec with the value of 'path' given to the finders. None is returned if no spec could be found. If the name is for submodule (contains a dot), the parent module is automatically imported. The name and package arguments work the same as importlib.import_module(). In other words, relative module names (with leading dots) work.
[ "Return", "the", "spec", "for", "the", "specified", "module", ".", "First", "sys", ".", "modules", "is", "checked", "to", "see", "if", "the", "module", "was", "already", "imported", ".", "If", "so", "then", "sys", ".", "modules", "[", "name", "]", ".",...
train
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/util.py#L75-L108
asmodehn/filefinder2
filefinder2/util.py
set_package
def set_package(fxn): """Set __package__ on the returned module. This function is deprecated. """ @functools.wraps(fxn) def set_package_wrapper(*args, **kwargs): warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) module = fxn(*args, **kwargs) if getattr(module, '__package__', None) is None: module.__package__ = module.__name__ if not hasattr(module, '__path__'): module.__package__ = module.__package__.rpartition('.')[0] return module return set_package_wrapper
python
def set_package(fxn): """Set __package__ on the returned module. This function is deprecated. """ @functools.wraps(fxn) def set_package_wrapper(*args, **kwargs): warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) module = fxn(*args, **kwargs) if getattr(module, '__package__', None) is None: module.__package__ = module.__name__ if not hasattr(module, '__path__'): module.__package__ = module.__package__.rpartition('.')[0] return module return set_package_wrapper
[ "def", "set_package", "(", "fxn", ")", ":", "@", "functools", ".", "wraps", "(", "fxn", ")", "def", "set_package_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "'The import system now takes care of this automatical...
Set __package__ on the returned module. This function is deprecated.
[ "Set", "__package__", "on", "the", "returned", "module", ".", "This", "function", "is", "deprecated", "." ]
train
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/util.py#L111-L125
asmodehn/filefinder2
filefinder2/util.py
set_loader
def set_loader(fxn): """Set __loader__ on the returned module. This function is deprecated. """ @functools.wraps(fxn) def set_loader_wrapper(self, *args, **kwargs): warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) module = fxn(self, *args, **kwargs) if getattr(module, '__loader__', None) is None: module.__loader__ = self return module return set_loader_wrapper
python
def set_loader(fxn): """Set __loader__ on the returned module. This function is deprecated. """ @functools.wraps(fxn) def set_loader_wrapper(self, *args, **kwargs): warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) module = fxn(self, *args, **kwargs) if getattr(module, '__loader__', None) is None: module.__loader__ = self return module return set_loader_wrapper
[ "def", "set_loader", "(", "fxn", ")", ":", "@", "functools", ".", "wraps", "(", "fxn", ")", "def", "set_loader_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "'The import system now takes care of t...
Set __loader__ on the returned module. This function is deprecated.
[ "Set", "__loader__", "on", "the", "returned", "module", ".", "This", "function", "is", "deprecated", "." ]
train
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/util.py#L128-L140
asmodehn/filefinder2
filefinder2/util.py
module_for_loader
def module_for_loader(fxn): """Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument. """ warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) @functools.wraps(fxn) def module_for_loader_wrapper(self, fullname, *args, **kwargs): with _module_to_load(fullname) as module: module.__loader__ = self try: is_package = self.is_package(fullname) except (ImportError, AttributeError): pass else: if is_package: module.__package__ = fullname else: module.__package__ = fullname.rpartition('.')[0] # If __package__ was not set above, __import__() will do it later. return fxn(self, module, *args, **kwargs) return module_for_loader_wrapper
python
def module_for_loader(fxn): """Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument. """ warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) @functools.wraps(fxn) def module_for_loader_wrapper(self, fullname, *args, **kwargs): with _module_to_load(fullname) as module: module.__loader__ = self try: is_package = self.is_package(fullname) except (ImportError, AttributeError): pass else: if is_package: module.__package__ = fullname else: module.__package__ = fullname.rpartition('.')[0] # If __package__ was not set above, __import__() will do it later. return fxn(self, module, *args, **kwargs) return module_for_loader_wrapper
[ "def", "module_for_loader", "(", "fxn", ")", ":", "warnings", ".", "warn", "(", "'The import system now takes care of this automatically.'", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "@", "functools", ".", "wraps", "(", "fxn", ")", "def", "modul...
Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument.
[ "Decorator", "to", "handle", "selecting", "the", "proper", "module", "for", "loaders", ".", "The", "decorated", "function", "is", "passed", "the", "module", "to", "use", "instead", "of", "the", "module", "name", ".", "The", "module", "passed", "in", "to", ...
train
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/util.py#L169-L201
calve/prof
prof/main.py
print_fields
def print_fields(fields, sort_by_date=False, sort_by_open_projects=False): """ Print a list of available fields and works sort_by_date : boolean whether we print works by their due date """ if (not sort_by_date) and (not sort_by_open_projects): for (_, name, works) in fields: print(name) for work in works: print('- '+str(work)) else: works = all_works # Sort works by due_date if sort_by_date: works.sort(key=lambda x: (not x.is_open, x.due_date), reverse=True) for work in works: if sort_by_open_projects: if not work.is_open: continue # This is ugly, but there is no way to know the field name of a work without searching for it, at the moment field_name = [name for id, name, _ in fields if id == work.field][0] print(field_name) print('- '+str(work))
python
def print_fields(fields, sort_by_date=False, sort_by_open_projects=False): """ Print a list of available fields and works sort_by_date : boolean whether we print works by their due date """ if (not sort_by_date) and (not sort_by_open_projects): for (_, name, works) in fields: print(name) for work in works: print('- '+str(work)) else: works = all_works # Sort works by due_date if sort_by_date: works.sort(key=lambda x: (not x.is_open, x.due_date), reverse=True) for work in works: if sort_by_open_projects: if not work.is_open: continue # This is ugly, but there is no way to know the field name of a work without searching for it, at the moment field_name = [name for id, name, _ in fields if id == work.field][0] print(field_name) print('- '+str(work))
[ "def", "print_fields", "(", "fields", ",", "sort_by_date", "=", "False", ",", "sort_by_open_projects", "=", "False", ")", ":", "if", "(", "not", "sort_by_date", ")", "and", "(", "not", "sort_by_open_projects", ")", ":", "for", "(", "_", ",", "name", ",", ...
Print a list of available fields and works sort_by_date : boolean whether we print works by their due date
[ "Print", "a", "list", "of", "available", "fields", "and", "works", "sort_by_date", ":", "boolean", "whether", "we", "print", "works", "by", "their", "due", "date" ]
train
https://github.com/calve/prof/blob/c6e034f45ab60908dea661e8271bc44758aeedcf/prof/main.py#L11-L33
calve/prof
prof/main.py
send_work
def send_work(baseurl, work_id=None, filename=None, command="make"): """Ask user for a file to send to a work""" while 1: if not work_id: try: work_id = input("id? ") except KeyboardInterrupt: exit(0) work = get_work(work_id) if not work: print("id '{0}' not found".format(work_id)) work_id = None continue if not work.is_open: # Verify it is open print('"It\'s too late for {0} baby..." (Arnold Schwarzenegger)'.format(work.title)) work_id = None continue if not filename: try: filename = input("filename? ") except KeyboardInterrupt: exit(0) while 1: try: if command: if not archive_compile(filename, command): print("Compilation failed") try: send = input("Send anyway [y/N] ") except KeyboardInterrupt: exit(0) if send != "y": exit(1) return work.upload(baseurl, filename) print("Uplodaed, but should verify it on the website") return except FileNotFoundError: print("{0} not found in current dir".format(filename)) filename = None
python
def send_work(baseurl, work_id=None, filename=None, command="make"): """Ask user for a file to send to a work""" while 1: if not work_id: try: work_id = input("id? ") except KeyboardInterrupt: exit(0) work = get_work(work_id) if not work: print("id '{0}' not found".format(work_id)) work_id = None continue if not work.is_open: # Verify it is open print('"It\'s too late for {0} baby..." (Arnold Schwarzenegger)'.format(work.title)) work_id = None continue if not filename: try: filename = input("filename? ") except KeyboardInterrupt: exit(0) while 1: try: if command: if not archive_compile(filename, command): print("Compilation failed") try: send = input("Send anyway [y/N] ") except KeyboardInterrupt: exit(0) if send != "y": exit(1) return work.upload(baseurl, filename) print("Uplodaed, but should verify it on the website") return except FileNotFoundError: print("{0} not found in current dir".format(filename)) filename = None
[ "def", "send_work", "(", "baseurl", ",", "work_id", "=", "None", ",", "filename", "=", "None", ",", "command", "=", "\"make\"", ")", ":", "while", "1", ":", "if", "not", "work_id", ":", "try", ":", "work_id", "=", "input", "(", "\"id? \"", ")", "exce...
Ask user for a file to send to a work
[ "Ask", "user", "for", "a", "file", "to", "send", "to", "a", "work" ]
train
https://github.com/calve/prof/blob/c6e034f45ab60908dea661e8271bc44758aeedcf/prof/main.py#L36-L75
asmodehn/filefinder2
filefinder2/enforce/__init__.py
activate
def activate(): """Install the path-based import components.""" global PathFinder, FileFinder, ff_path_hook path_hook_index = len(sys.path_hooks) sys.path_hooks.append(ff_path_hook) # Resetting sys.path_importer_cache values, # to support the case where we have an implicit package inside an already loaded package, # since we need to replace the default importer. sys.path_importer_cache.clear() # Setting up the meta_path to change package finding logic pathfinder_index = len(sys.meta_path) sys.meta_path.append(PathFinder) return path_hook_index, pathfinder_index
python
def activate(): """Install the path-based import components.""" global PathFinder, FileFinder, ff_path_hook path_hook_index = len(sys.path_hooks) sys.path_hooks.append(ff_path_hook) # Resetting sys.path_importer_cache values, # to support the case where we have an implicit package inside an already loaded package, # since we need to replace the default importer. sys.path_importer_cache.clear() # Setting up the meta_path to change package finding logic pathfinder_index = len(sys.meta_path) sys.meta_path.append(PathFinder) return path_hook_index, pathfinder_index
[ "def", "activate", "(", ")", ":", "global", "PathFinder", ",", "FileFinder", ",", "ff_path_hook", "path_hook_index", "=", "len", "(", "sys", ".", "path_hooks", ")", "sys", ".", "path_hooks", ".", "append", "(", "ff_path_hook", ")", "# Resetting sys.path_importer...
Install the path-based import components.
[ "Install", "the", "path", "-", "based", "import", "components", "." ]
train
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/enforce/__init__.py#L25-L41
appdotnet/ADNpy
adnpy/recipes/broadcast.py
BroadcastMessageBuilder.send
def send(self): """Sends the broadcast message. :returns: tuple of (:class:`adnpy.models.Message`, :class:`adnpy.models.APIMeta`) """ parse_links = self.parse_links or self.parse_markdown_links message = { 'annotations': [], 'entities': { 'parse_links': parse_links, 'parse_markdown_links': self.parse_markdown_links, } } if self.photo: photo, photo_meta = _upload_file(self.api, self.photo) message['annotations'].append({ 'type': 'net.app.core.oembed', 'value': { '+net.app.core.file': { 'file_id': photo.id, 'file_token': photo.file_token, 'format': 'oembed', } } }) if self.attachment: attachment, attachment_meta = _upload_file(self.api, self.attachment) message['annotations'].append({ 'type': 'net.app.core.attachments', 'value': { '+net.app.core.file_list': [ { 'file_id': attachment.id, 'file_token': attachment.file_token, 'format': 'metadata', } ] } }) if self.text: message['text'] = self.text else: message['machine_only'] = True if self.headline: message['annotations'].append({ 'type': 'net.app.core.broadcast.message.metadata', 'value': { 'subject': self.headline, }, }) if self.read_more_link: message['annotations'].append({ 'type': 'net.app.core.crosspost', 'value': { 'canonical_url': self.read_more_link, } }) return self.api.create_message(self.channel_id, data=message)
python
def send(self): """Sends the broadcast message. :returns: tuple of (:class:`adnpy.models.Message`, :class:`adnpy.models.APIMeta`) """ parse_links = self.parse_links or self.parse_markdown_links message = { 'annotations': [], 'entities': { 'parse_links': parse_links, 'parse_markdown_links': self.parse_markdown_links, } } if self.photo: photo, photo_meta = _upload_file(self.api, self.photo) message['annotations'].append({ 'type': 'net.app.core.oembed', 'value': { '+net.app.core.file': { 'file_id': photo.id, 'file_token': photo.file_token, 'format': 'oembed', } } }) if self.attachment: attachment, attachment_meta = _upload_file(self.api, self.attachment) message['annotations'].append({ 'type': 'net.app.core.attachments', 'value': { '+net.app.core.file_list': [ { 'file_id': attachment.id, 'file_token': attachment.file_token, 'format': 'metadata', } ] } }) if self.text: message['text'] = self.text else: message['machine_only'] = True if self.headline: message['annotations'].append({ 'type': 'net.app.core.broadcast.message.metadata', 'value': { 'subject': self.headline, }, }) if self.read_more_link: message['annotations'].append({ 'type': 'net.app.core.crosspost', 'value': { 'canonical_url': self.read_more_link, } }) return self.api.create_message(self.channel_id, data=message)
[ "def", "send", "(", "self", ")", ":", "parse_links", "=", "self", ".", "parse_links", "or", "self", ".", "parse_markdown_links", "message", "=", "{", "'annotations'", ":", "[", "]", ",", "'entities'", ":", "{", "'parse_links'", ":", "parse_links", ",", "'p...
Sends the broadcast message. :returns: tuple of (:class:`adnpy.models.Message`, :class:`adnpy.models.APIMeta`)
[ "Sends", "the", "broadcast", "message", "." ]
train
https://github.com/appdotnet/ADNpy/blob/aedb181cd0d616257fac7b3676ac7d7211336118/adnpy/recipes/broadcast.py#L87-L152
azraq27/neural
neural/scheduler.py
Scheduler.add_server
def add_server(self,address,port=default_port,password=None,speed=None,valid_times=None,invalid_times=None): ''' :address: remote address of server, or special string ``local`` to run the command locally :valid_times: times when this server is available, given as a list of tuples of 2 strings of form "HH:MM" that define the start and end times. Alternatively, a list of 7 lists can be given to define times on a per-day-of-week basis E.g.,:: [('4:30','14:30'),('17:00','23:00')] # or [ [('4:30','14:30'),('17:00','23:00')], # S [('4:30','14:30'),('17:00','23:00')], # M [('4:30','14:30'),('17:00','23:00')], # T [('4:30','14:30'),('17:00','23:00')], # W [('4:30','14:30'),('17:00','23:00')], # R [('4:30','14:30'),('17:00','23:00')], # F [('4:30','14:30'),('17:00','23:00')] # S ] :invalid_times: uses the same format as ``valid_times`` but defines times when the server should not be used ''' for t in [valid_times,invalid_times]: if t: if not (self._is_list_of_tuples(t) or self._is_list_of_tuples(t,True)): raise ValueError('valid_times and invalid_times must either be lists of strings or lists') self.servers.append({ 'address':address, 'port':port, 'password':password, 'speed':speed, 'valid_times':valid_times, 'invalid_times':invalid_times })
python
def add_server(self,address,port=default_port,password=None,speed=None,valid_times=None,invalid_times=None): ''' :address: remote address of server, or special string ``local`` to run the command locally :valid_times: times when this server is available, given as a list of tuples of 2 strings of form "HH:MM" that define the start and end times. Alternatively, a list of 7 lists can be given to define times on a per-day-of-week basis E.g.,:: [('4:30','14:30'),('17:00','23:00')] # or [ [('4:30','14:30'),('17:00','23:00')], # S [('4:30','14:30'),('17:00','23:00')], # M [('4:30','14:30'),('17:00','23:00')], # T [('4:30','14:30'),('17:00','23:00')], # W [('4:30','14:30'),('17:00','23:00')], # R [('4:30','14:30'),('17:00','23:00')], # F [('4:30','14:30'),('17:00','23:00')] # S ] :invalid_times: uses the same format as ``valid_times`` but defines times when the server should not be used ''' for t in [valid_times,invalid_times]: if t: if not (self._is_list_of_tuples(t) or self._is_list_of_tuples(t,True)): raise ValueError('valid_times and invalid_times must either be lists of strings or lists') self.servers.append({ 'address':address, 'port':port, 'password':password, 'speed':speed, 'valid_times':valid_times, 'invalid_times':invalid_times })
[ "def", "add_server", "(", "self", ",", "address", ",", "port", "=", "default_port", ",", "password", "=", "None", ",", "speed", "=", "None", ",", "valid_times", "=", "None", ",", "invalid_times", "=", "None", ")", ":", "for", "t", "in", "[", "valid_tim...
:address: remote address of server, or special string ``local`` to run the command locally :valid_times: times when this server is available, given as a list of tuples of 2 strings of form "HH:MM" that define the start and end times. Alternatively, a list of 7 lists can be given to define times on a per-day-of-week basis E.g.,:: [('4:30','14:30'),('17:00','23:00')] # or [ [('4:30','14:30'),('17:00','23:00')], # S [('4:30','14:30'),('17:00','23:00')], # M [('4:30','14:30'),('17:00','23:00')], # T [('4:30','14:30'),('17:00','23:00')], # W [('4:30','14:30'),('17:00','23:00')], # R [('4:30','14:30'),('17:00','23:00')], # F [('4:30','14:30'),('17:00','23:00')] # S ] :invalid_times: uses the same format as ``valid_times`` but defines times when the server should not be used
[ ":", "address", ":", "remote", "address", "of", "server", "or", "special", "string", "local", "to", "run", "the", "command", "locally", ":", "valid_times", ":", "times", "when", "this", "server", "is", "available", "given", "as", "a", "list", "of", "tuples...
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/scheduler.py#L114-L150
calve/prof
prof/make.py
archive_compile
def archive_compile(filename, command="make"): """ Returns if the given archive properly compile. Extract it in a temporary directory, run the given command, and return True it's result is 0 """ if not tarfile.is_tarfile(filename): print("Cannot extract archive") return False if command == "": return True with tempfile.TemporaryDirectory(suffix="prof") as tmpdir: with tarfile.open(filename) as tararchive: tararchive.extractall(tmpdir) cwd = os.getcwd() # get current directory try: os.chdir(tmpdir) print("Running {} in {} for file {}".format(command, tmpdir, filename)) make = os.system(command) if make == 0: print("Successfully compiled") return True finally: os.chdir(cwd) return False
python
def archive_compile(filename, command="make"): """ Returns if the given archive properly compile. Extract it in a temporary directory, run the given command, and return True it's result is 0 """ if not tarfile.is_tarfile(filename): print("Cannot extract archive") return False if command == "": return True with tempfile.TemporaryDirectory(suffix="prof") as tmpdir: with tarfile.open(filename) as tararchive: tararchive.extractall(tmpdir) cwd = os.getcwd() # get current directory try: os.chdir(tmpdir) print("Running {} in {} for file {}".format(command, tmpdir, filename)) make = os.system(command) if make == 0: print("Successfully compiled") return True finally: os.chdir(cwd) return False
[ "def", "archive_compile", "(", "filename", ",", "command", "=", "\"make\"", ")", ":", "if", "not", "tarfile", ".", "is_tarfile", "(", "filename", ")", ":", "print", "(", "\"Cannot extract archive\"", ")", "return", "False", "if", "command", "==", "\"\"", ":"...
Returns if the given archive properly compile. Extract it in a temporary directory, run the given command, and return True it's result is 0
[ "Returns", "if", "the", "given", "archive", "properly", "compile", ".", "Extract", "it", "in", "a", "temporary", "directory", "run", "the", "given", "command", "and", "return", "True", "it", "s", "result", "is", "0" ]
train
https://github.com/calve/prof/blob/c6e034f45ab60908dea661e8271bc44758aeedcf/prof/make.py#L6-L29
mozaiques/denis
denis/__init__.py
haversine
def haversine(lat_lng1, lat_lng2, native=True): if native: return _native.haversine(lat_lng1, lat_lng2) """Cf https://github.com/mapado/haversine""" lat1, lng1 = lat_lng1 lat2, lng2 = lat_lng2 lat1, lng1, lat2, lng2 = map(math.radians, (lat1, lng1, lat2, lng2)) lat = lat2 - lat1 lng = lng2 - lng1 d = math.sin(lat * 0.5) ** 2 \ + math.cos(lat1) * math.cos(lat2) * math.sin(lng * 0.5) ** 2 return 2 * _AVG_EARTH_RADIUS * math.asin(math.sqrt(d))
python
def haversine(lat_lng1, lat_lng2, native=True): if native: return _native.haversine(lat_lng1, lat_lng2) """Cf https://github.com/mapado/haversine""" lat1, lng1 = lat_lng1 lat2, lng2 = lat_lng2 lat1, lng1, lat2, lng2 = map(math.radians, (lat1, lng1, lat2, lng2)) lat = lat2 - lat1 lng = lng2 - lng1 d = math.sin(lat * 0.5) ** 2 \ + math.cos(lat1) * math.cos(lat2) * math.sin(lng * 0.5) ** 2 return 2 * _AVG_EARTH_RADIUS * math.asin(math.sqrt(d))
[ "def", "haversine", "(", "lat_lng1", ",", "lat_lng2", ",", "native", "=", "True", ")", ":", "if", "native", ":", "return", "_native", ".", "haversine", "(", "lat_lng1", ",", "lat_lng2", ")", "lat1", ",", "lng1", "=", "lat_lng1", "lat2", ",", "lng2", "=...
Cf https://github.com/mapado/haversine
[ "Cf", "https", ":", "//", "github", ".", "com", "/", "mapado", "/", "haversine" ]
train
https://github.com/mozaiques/denis/blob/2741d0e4583f059f7f6aa9aa613cbad70982c235/denis/__init__.py#L41-L53
wtsi-hgi/python-git-subrepo
gitsubrepo/_common.py
run
def run(arguments: List[str], execution_directory: str=None, execution_environment: Dict=None) -> str: """ Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory). :param arguments: the CLI arguments to run :param execution_directory: the directory to execute the arguments in :param execution_environment: the environment to execute in :return: what is written to stdout following execution :exception RunException: called if the execution has a non-zero return code """ process = subprocess.Popen( arguments, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=execution_directory, env=execution_environment) out, error = process.communicate() stdout = out.decode(_DATA_ENCODING).rstrip() if process.returncode == _SUCCESS_RETURN_CODE: return stdout else: raise RunException(stdout, error.decode(_DATA_ENCODING).rstrip(), arguments, execution_directory)
python
def run(arguments: List[str], execution_directory: str=None, execution_environment: Dict=None) -> str: """ Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory). :param arguments: the CLI arguments to run :param execution_directory: the directory to execute the arguments in :param execution_environment: the environment to execute in :return: what is written to stdout following execution :exception RunException: called if the execution has a non-zero return code """ process = subprocess.Popen( arguments, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=execution_directory, env=execution_environment) out, error = process.communicate() stdout = out.decode(_DATA_ENCODING).rstrip() if process.returncode == _SUCCESS_RETURN_CODE: return stdout else: raise RunException(stdout, error.decode(_DATA_ENCODING).rstrip(), arguments, execution_directory)
[ "def", "run", "(", "arguments", ":", "List", "[", "str", "]", ",", "execution_directory", ":", "str", "=", "None", ",", "execution_environment", ":", "Dict", "=", "None", ")", "->", "str", ":", "process", "=", "subprocess", ".", "Popen", "(", "arguments"...
Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory). :param arguments: the CLI arguments to run :param execution_directory: the directory to execute the arguments in :param execution_environment: the environment to execute in :return: what is written to stdout following execution :exception RunException: called if the execution has a non-zero return code
[ "Runs", "the", "given", "arguments", "from", "the", "given", "directory", "(", "if", "given", "else", "resorts", "to", "the", "(", "undefined", ")", "current", "directory", ")", ".", ":", "param", "arguments", ":", "the", "CLI", "arguments", "to", "run", ...
train
https://github.com/wtsi-hgi/python-git-subrepo/blob/bb2eb2bd9a7e51b862298ddb4168cc5b8633dad0/gitsubrepo/_common.py#L10-L27
gisgroup/statbank-python
statbank/request.py
Request.raw
def raw(self): """Make request to url and return the raw response object. """ try: return urlopen(str(self.url)) except HTTPError as error: try: # parse error body as json and use message property as error message parsed = self._parsejson(error) exc = RequestError(parsed['message']) exc.__cause__ = None raise exc except ValueError: # when error body is not valid json, error might be caused by server exc = StatbankError() exc.__cause__ = None raise exc
python
def raw(self): """Make request to url and return the raw response object. """ try: return urlopen(str(self.url)) except HTTPError as error: try: # parse error body as json and use message property as error message parsed = self._parsejson(error) exc = RequestError(parsed['message']) exc.__cause__ = None raise exc except ValueError: # when error body is not valid json, error might be caused by server exc = StatbankError() exc.__cause__ = None raise exc
[ "def", "raw", "(", "self", ")", ":", "try", ":", "return", "urlopen", "(", "str", "(", "self", ".", "url", ")", ")", "except", "HTTPError", "as", "error", ":", "try", ":", "# parse error body as json and use message property as error message", "parsed", "=", "...
Make request to url and return the raw response object.
[ "Make", "request", "to", "url", "and", "return", "the", "raw", "response", "object", "." ]
train
https://github.com/gisgroup/statbank-python/blob/3678820d8da35f225d706ea5096c1f08bf0b9c68/statbank/request.py#L25-L41
gisgroup/statbank-python
statbank/request.py
Request.csv
def csv(self): """Parse raw response as csv and return row object list. """ lines = self._parsecsv(self.raw) # set keys from header line (first line) keys = next(lines) for line in lines: yield dict(zip(keys, line))
python
def csv(self): """Parse raw response as csv and return row object list. """ lines = self._parsecsv(self.raw) # set keys from header line (first line) keys = next(lines) for line in lines: yield dict(zip(keys, line))
[ "def", "csv", "(", "self", ")", ":", "lines", "=", "self", ".", "_parsecsv", "(", "self", ".", "raw", ")", "# set keys from header line (first line)", "keys", "=", "next", "(", "lines", ")", "for", "line", "in", "lines", ":", "yield", "dict", "(", "zip",...
Parse raw response as csv and return row object list.
[ "Parse", "raw", "response", "as", "csv", "and", "return", "row", "object", "list", "." ]
train
https://github.com/gisgroup/statbank-python/blob/3678820d8da35f225d706ea5096c1f08bf0b9c68/statbank/request.py#L50-L59
gisgroup/statbank-python
statbank/request.py
Request._parsecsv
def _parsecsv(x): """Deserialize file-like object containing csv to a Python generator. """ for line in x: # decode as utf-8, whitespace-strip and split on delimiter yield line.decode('utf-8').strip().split(config.DELIMITER)
python
def _parsecsv(x): """Deserialize file-like object containing csv to a Python generator. """ for line in x: # decode as utf-8, whitespace-strip and split on delimiter yield line.decode('utf-8').strip().split(config.DELIMITER)
[ "def", "_parsecsv", "(", "x", ")", ":", "for", "line", "in", "x", ":", "# decode as utf-8, whitespace-strip and split on delimiter", "yield", "line", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", ".", "split", "(", "config", ".", "DELIMITER", "...
Deserialize file-like object containing csv to a Python generator.
[ "Deserialize", "file", "-", "like", "object", "containing", "csv", "to", "a", "Python", "generator", "." ]
train
https://github.com/gisgroup/statbank-python/blob/3678820d8da35f225d706ea5096c1f08bf0b9c68/statbank/request.py#L62-L67
9seconds/pep3134
pep3134/utils.py
construct_exc_class
def construct_exc_class(cls): """Constructs proxy class for the exception.""" class ProxyException(cls, BaseException): __pep3134__ = True @property def __traceback__(self): if self.__fixed_traceback__: return self.__fixed_traceback__ current_exc, current_tb = sys.exc_info()[1:] if current_exc is self: return current_tb def __init__(self, instance=None): # pylint: disable=W0231 self.__original_exception__ = instance self.__fixed_traceback__ = None def __getattr__(self, item): return getattr(self.__original_exception__, item) def __repr__(self): return repr(self.__original_exception__) def __str__(self): return str(self.__original_exception__) def with_traceback(self, traceback): instance = copy.copy(self) instance.__fixed_traceback__ = traceback return instance ProxyException.__name__ = cls.__name__ return ProxyException
python
def construct_exc_class(cls): """Constructs proxy class for the exception.""" class ProxyException(cls, BaseException): __pep3134__ = True @property def __traceback__(self): if self.__fixed_traceback__: return self.__fixed_traceback__ current_exc, current_tb = sys.exc_info()[1:] if current_exc is self: return current_tb def __init__(self, instance=None): # pylint: disable=W0231 self.__original_exception__ = instance self.__fixed_traceback__ = None def __getattr__(self, item): return getattr(self.__original_exception__, item) def __repr__(self): return repr(self.__original_exception__) def __str__(self): return str(self.__original_exception__) def with_traceback(self, traceback): instance = copy.copy(self) instance.__fixed_traceback__ = traceback return instance ProxyException.__name__ = cls.__name__ return ProxyException
[ "def", "construct_exc_class", "(", "cls", ")", ":", "class", "ProxyException", "(", "cls", ",", "BaseException", ")", ":", "__pep3134__", "=", "True", "@", "property", "def", "__traceback__", "(", "self", ")", ":", "if", "self", ".", "__fixed_traceback__", "...
Constructs proxy class for the exception.
[ "Constructs", "proxy", "class", "for", "the", "exception", "." ]
train
https://github.com/9seconds/pep3134/blob/6b6fae903bb63cb2ac24004bb2c18ebc6a7d41d0/pep3134/utils.py#L9-L44
9seconds/pep3134
pep3134/utils.py
prepare_raise
def prepare_raise(func): """ Just a short decorator which shrinks full ``raise (E, V, T)`` form into proper ``raise E(V), T``. """ @functools.wraps(func) def decorator(type_, value=None, traceback=None): if value is not None and isinstance(type_, Exception): raise TypeError("instance exception may not have a separate value") if value is None: if isinstance(type_, Exception): error = type_ else: error = type_() else: error = type_(value) func(error, value, traceback) return decorator
python
def prepare_raise(func): """ Just a short decorator which shrinks full ``raise (E, V, T)`` form into proper ``raise E(V), T``. """ @functools.wraps(func) def decorator(type_, value=None, traceback=None): if value is not None and isinstance(type_, Exception): raise TypeError("instance exception may not have a separate value") if value is None: if isinstance(type_, Exception): error = type_ else: error = type_() else: error = type_(value) func(error, value, traceback) return decorator
[ "def", "prepare_raise", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "decorator", "(", "type_", ",", "value", "=", "None", ",", "traceback", "=", "None", ")", ":", "if", "value", "is", "not", "None", "and", "isinsta...
Just a short decorator which shrinks full ``raise (E, V, T)`` form into proper ``raise E(V), T``.
[ "Just", "a", "short", "decorator", "which", "shrinks", "full", "raise", "(", "E", "V", "T", ")", "form", "into", "proper", "raise", "E", "(", "V", ")", "T", "." ]
train
https://github.com/9seconds/pep3134/blob/6b6fae903bb63cb2ac24004bb2c18ebc6a7d41d0/pep3134/utils.py#L47-L67
aerogear/digger-build-cli
digger/base/build.py
BaseBuild.from_url
def from_url(cls, url, **kwargs): """ Downloads a zipped app source code from an url. :param url: url to download the app source from Returns A project instance. """ username = kwargs.get('username') password = kwargs.get('password') headers = kwargs.get('headers', {}) auth = None path = kwargs.get('path', '/tmp/app.zip') dest = kwargs.get('dest', '/app') if username and password: auth = base64.b64encode(b'%s:%s' % (username, password)) if auth: headers['Authorization'] = 'Basic %s' % auth.decode('utf8') r = request.get(url, headers=headers, stream=True) if r.status_code != 200: err_msg = 'Could not download resource from url (%s): %s' err_args = (r.status_code, url) raise errors.DownloadError(err_msg % err_args) with open('/tmp/app.zip', 'wb+') as f: chunks = r.iter_content(chunk_size=1024) [f.write(chunk) for chunk in chunks if chunk] return cls.from_zip(path, dest)
python
def from_url(cls, url, **kwargs): """ Downloads a zipped app source code from an url. :param url: url to download the app source from Returns A project instance. """ username = kwargs.get('username') password = kwargs.get('password') headers = kwargs.get('headers', {}) auth = None path = kwargs.get('path', '/tmp/app.zip') dest = kwargs.get('dest', '/app') if username and password: auth = base64.b64encode(b'%s:%s' % (username, password)) if auth: headers['Authorization'] = 'Basic %s' % auth.decode('utf8') r = request.get(url, headers=headers, stream=True) if r.status_code != 200: err_msg = 'Could not download resource from url (%s): %s' err_args = (r.status_code, url) raise errors.DownloadError(err_msg % err_args) with open('/tmp/app.zip', 'wb+') as f: chunks = r.iter_content(chunk_size=1024) [f.write(chunk) for chunk in chunks if chunk] return cls.from_zip(path, dest)
[ "def", "from_url", "(", "cls", ",", "url", ",", "*", "*", "kwargs", ")", ":", "username", "=", "kwargs", ".", "get", "(", "'username'", ")", "password", "=", "kwargs", ".", "get", "(", "'password'", ")", "headers", "=", "kwargs", ".", "get", "(", "...
Downloads a zipped app source code from an url. :param url: url to download the app source from Returns A project instance.
[ "Downloads", "a", "zipped", "app", "source", "code", "from", "an", "url", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/base/build.py#L19-L46
aerogear/digger-build-cli
digger/base/build.py
BaseBuild.from_path
def from_path(cls, path): """ Instantiates a project class from a given path. :param path: app folder path source code Returns A project instance. """ if os.path.exists(path) is False: raise errors.InvalidPathError(path) return cls(path=path)
python
def from_path(cls, path): """ Instantiates a project class from a given path. :param path: app folder path source code Returns A project instance. """ if os.path.exists(path) is False: raise errors.InvalidPathError(path) return cls(path=path)
[ "def", "from_path", "(", "cls", ",", "path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", "is", "False", ":", "raise", "errors", ".", "InvalidPathError", "(", "path", ")", "return", "cls", "(", "path", "=", "path", ")" ]
Instantiates a project class from a given path. :param path: app folder path source code Returns A project instance.
[ "Instantiates", "a", "project", "class", "from", "a", "given", "path", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/base/build.py#L49-L60
aerogear/digger-build-cli
digger/base/build.py
BaseBuild.from_zip
def from_zip(cls, src='/tmp/app.zip', dest='/app'): """ Unzips a zipped app project file and instantiates it. :param src: zipfile path :param dest: destination folder to extract the zipfile content Returns A project instance. """ try: zf = zipfile.ZipFile(src, 'r') except FileNotFoundError: raise errors.InvalidPathError(src) except zipfile.BadZipFile: raise errors.InvalidZipFileError(src) [zf.extract(file, dest) for file in zf.namelist()] zf.close() return cls.from_path(dest)
python
def from_zip(cls, src='/tmp/app.zip', dest='/app'): """ Unzips a zipped app project file and instantiates it. :param src: zipfile path :param dest: destination folder to extract the zipfile content Returns A project instance. """ try: zf = zipfile.ZipFile(src, 'r') except FileNotFoundError: raise errors.InvalidPathError(src) except zipfile.BadZipFile: raise errors.InvalidZipFileError(src) [zf.extract(file, dest) for file in zf.namelist()] zf.close() return cls.from_path(dest)
[ "def", "from_zip", "(", "cls", ",", "src", "=", "'/tmp/app.zip'", ",", "dest", "=", "'/app'", ")", ":", "try", ":", "zf", "=", "zipfile", ".", "ZipFile", "(", "src", ",", "'r'", ")", "except", "FileNotFoundError", ":", "raise", "errors", ".", "InvalidP...
Unzips a zipped app project file and instantiates it. :param src: zipfile path :param dest: destination folder to extract the zipfile content Returns A project instance.
[ "Unzips", "a", "zipped", "app", "project", "file", "and", "instantiates", "it", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/base/build.py#L63-L81
aerogear/digger-build-cli
digger/base/build.py
BaseBuild.inspect
def inspect(self, tab_width=2, ident_char='-'): """ Inspects a project file structure based based on the instance folder property. :param tab_width: width size for subfolders and files. :param ident_char: char to be used to show identation level Returns A string containing the project structure. """ startpath = self.path output = [] for (root, dirs, files) in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = ident_char * tab_width * (level) if level == 0: output.append('{}{}/'.format(indent, os.path.basename(root))) else: output.append('|{}{}/'.format(indent, os.path.basename(root))) subindent = ident_char * tab_width * (level + 1) [output.append('|{}{}'.format(subindent, f)) for f in files] return '\n'.join(output)
python
def inspect(self, tab_width=2, ident_char='-'): """ Inspects a project file structure based based on the instance folder property. :param tab_width: width size for subfolders and files. :param ident_char: char to be used to show identation level Returns A string containing the project structure. """ startpath = self.path output = [] for (root, dirs, files) in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = ident_char * tab_width * (level) if level == 0: output.append('{}{}/'.format(indent, os.path.basename(root))) else: output.append('|{}{}/'.format(indent, os.path.basename(root))) subindent = ident_char * tab_width * (level + 1) [output.append('|{}{}'.format(subindent, f)) for f in files] return '\n'.join(output)
[ "def", "inspect", "(", "self", ",", "tab_width", "=", "2", ",", "ident_char", "=", "'-'", ")", ":", "startpath", "=", "self", ".", "path", "output", "=", "[", "]", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", ...
Inspects a project file structure based based on the instance folder property. :param tab_width: width size for subfolders and files. :param ident_char: char to be used to show identation level Returns A string containing the project structure.
[ "Inspects", "a", "project", "file", "structure", "based", "based", "on", "the", "instance", "folder", "property", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/base/build.py#L94-L115
aerogear/digger-build-cli
digger/base/build.py
BaseBuild.log
def log(self, ctx='all'): """ Gets the build log output. :param ctx: specifies which log message to show, it can be 'validate', 'build' or 'all'. """ path = '%s/%s.log' % (self.path, ctx) if os.path.exists(path) is True: with open(path, 'r') as f: print(f.read()) return validate_path = '%s/validate.log' % self.path build_path = '%s/build.log' % self.path out = [] with open(validate_path) as validate_log, open(build_path) as build_log: for line in validate_log.readlines(): out.append(line) for line in build_log.readlines(): out.append(line) print(''.join(out))
python
def log(self, ctx='all'): """ Gets the build log output. :param ctx: specifies which log message to show, it can be 'validate', 'build' or 'all'. """ path = '%s/%s.log' % (self.path, ctx) if os.path.exists(path) is True: with open(path, 'r') as f: print(f.read()) return validate_path = '%s/validate.log' % self.path build_path = '%s/build.log' % self.path out = [] with open(validate_path) as validate_log, open(build_path) as build_log: for line in validate_log.readlines(): out.append(line) for line in build_log.readlines(): out.append(line) print(''.join(out))
[ "def", "log", "(", "self", ",", "ctx", "=", "'all'", ")", ":", "path", "=", "'%s/%s.log'", "%", "(", "self", ".", "path", ",", "ctx", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", "is", "True", ":", "with", "open", "(", "path",...
Gets the build log output. :param ctx: specifies which log message to show, it can be 'validate', 'build' or 'all'.
[ "Gets", "the", "build", "log", "output", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/base/build.py#L117-L136
petebachant/PXL
pxl/fdiff.py
second_order_diff
def second_order_diff(arr, x): """Compute second order difference of an array. A 2nd order forward difference is used for the first point, 2nd order central difference for interior, and 2nd order backward difference for last point, returning an array the same length as the input array. """ # Convert to array, so this will work with pandas Series arr = np.array(arr) # Calculate dx for forward diff point dxf = (x[2] - x[0])/2 # Calculate dx for backward diff point dxb = (x[-1] - x[-3])/2 # Calculate dx array for central difference dx = (x[2:] - x[:-2])/2 # For first data point, use 2nd order forward difference first = (-3*arr[0] + 4*arr[1] - arr[2])/(2*dxf) # For last point, use 2nd order backward difference last = (3*arr[-1] - 4*arr[-2] + arr[-3])/(2*dxb) # For all interior points, use 2nd order central difference interior = (arr[2:] - arr[:-2])/(2*dx) # Create entire array darr = np.concatenate(([first], interior, [last])) return darr
python
def second_order_diff(arr, x): """Compute second order difference of an array. A 2nd order forward difference is used for the first point, 2nd order central difference for interior, and 2nd order backward difference for last point, returning an array the same length as the input array. """ # Convert to array, so this will work with pandas Series arr = np.array(arr) # Calculate dx for forward diff point dxf = (x[2] - x[0])/2 # Calculate dx for backward diff point dxb = (x[-1] - x[-3])/2 # Calculate dx array for central difference dx = (x[2:] - x[:-2])/2 # For first data point, use 2nd order forward difference first = (-3*arr[0] + 4*arr[1] - arr[2])/(2*dxf) # For last point, use 2nd order backward difference last = (3*arr[-1] - 4*arr[-2] + arr[-3])/(2*dxb) # For all interior points, use 2nd order central difference interior = (arr[2:] - arr[:-2])/(2*dx) # Create entire array darr = np.concatenate(([first], interior, [last])) return darr
[ "def", "second_order_diff", "(", "arr", ",", "x", ")", ":", "# Convert to array, so this will work with pandas Series", "arr", "=", "np", ".", "array", "(", "arr", ")", "# Calculate dx for forward diff point", "dxf", "=", "(", "x", "[", "2", "]", "-", "x", "[", ...
Compute second order difference of an array. A 2nd order forward difference is used for the first point, 2nd order central difference for interior, and 2nd order backward difference for last point, returning an array the same length as the input array.
[ "Compute", "second", "order", "difference", "of", "an", "array", "." ]
train
https://github.com/petebachant/PXL/blob/d7d06cb74422e1ac0154741351fbecea080cfcc0/pxl/fdiff.py#L9-L32
BD2KOnFHIR/i2b2model
i2b2model/sqlsupport/tablebase.py
ColumnsBase._freeze
def _freeze(self) -> OrderedDict: """ Evaluate all of the column values and return the result :return: column/value tuples """ return OrderedDict(**{k: getattr(self, k, None) for k in super().__getattribute__("_columns")})
python
def _freeze(self) -> OrderedDict: """ Evaluate all of the column values and return the result :return: column/value tuples """ return OrderedDict(**{k: getattr(self, k, None) for k in super().__getattribute__("_columns")})
[ "def", "_freeze", "(", "self", ")", "->", "OrderedDict", ":", "return", "OrderedDict", "(", "*", "*", "{", "k", ":", "getattr", "(", "self", ",", "k", ",", "None", ")", "for", "k", "in", "super", "(", ")", ".", "__getattribute__", "(", "\"_columns\""...
Evaluate all of the column values and return the result :return: column/value tuples
[ "Evaluate", "all", "of", "the", "column", "values", "and", "return", "the", "result", ":", "return", ":", "column", "/", "value", "tuples" ]
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/sqlsupport/tablebase.py#L22-L27
BD2KOnFHIR/i2b2model
i2b2model/sqlsupport/tablebase.py
ColumnsBase._eval
def _eval(self, m: EvalParam) -> object: """ Evaluate m returning the method / function invocation or value. Kind of like a static method :param m: object to evaluate :return: return """ if inspect.ismethod(m) or inspect.isroutine(m): return m() elif inspect.isfunction(m): return m(self) if len(inspect.signature(m)) > 0 else m() else: return m
python
def _eval(self, m: EvalParam) -> object: """ Evaluate m returning the method / function invocation or value. Kind of like a static method :param m: object to evaluate :return: return """ if inspect.ismethod(m) or inspect.isroutine(m): return m() elif inspect.isfunction(m): return m(self) if len(inspect.signature(m)) > 0 else m() else: return m
[ "def", "_eval", "(", "self", ",", "m", ":", "EvalParam", ")", "->", "object", ":", "if", "inspect", ".", "ismethod", "(", "m", ")", "or", "inspect", ".", "isroutine", "(", "m", ")", ":", "return", "m", "(", ")", "elif", "inspect", ".", "isfunction"...
Evaluate m returning the method / function invocation or value. Kind of like a static method :param m: object to evaluate :return: return
[ "Evaluate", "m", "returning", "the", "method", "/", "function", "invocation", "or", "value", ".", "Kind", "of", "like", "a", "static", "method", ":", "param", "m", ":", "object", "to", "evaluate", ":", "return", ":", "return" ]
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/sqlsupport/tablebase.py#L29-L40
uw-it-aca/uw-restclients-uwnetid
uw_uwnetid/password.py
_process_json
def _process_json(response_body): """ Returns a UwPassword objects """ data = json.loads(response_body) uwpassword = UwPassword(uwnetid=data["uwNetID"], kerb_status=data["kerbStatus"], interval=None, last_change=None, last_change_med=None, expires_med=None, interval_med=None, minimum_length=int(data["minimumLength"]), time_stamp=parse(data["timeStamp"]),) if "lastChange" in data: uwpassword.last_change = parse(data["lastChange"]) if "interval" in data: uwpassword.interval = timeparse(data["interval"]) if "lastChangeMed" in data: uwpassword.last_change_med = parse(data["lastChangeMed"]) if "expiresMed" in data: uwpassword.expires_med = parse(data["expiresMed"]) if "intervalMed" in data: uwpassword.interval_med = timeparse(data["intervalMed"]) if "netidStatus" in data: netid_status = [] for status in data["netidStatus"]: netid_status.append(status) uwpassword.netid_status = netid_status return uwpassword
python
def _process_json(response_body): """ Returns a UwPassword objects """ data = json.loads(response_body) uwpassword = UwPassword(uwnetid=data["uwNetID"], kerb_status=data["kerbStatus"], interval=None, last_change=None, last_change_med=None, expires_med=None, interval_med=None, minimum_length=int(data["minimumLength"]), time_stamp=parse(data["timeStamp"]),) if "lastChange" in data: uwpassword.last_change = parse(data["lastChange"]) if "interval" in data: uwpassword.interval = timeparse(data["interval"]) if "lastChangeMed" in data: uwpassword.last_change_med = parse(data["lastChangeMed"]) if "expiresMed" in data: uwpassword.expires_med = parse(data["expiresMed"]) if "intervalMed" in data: uwpassword.interval_med = timeparse(data["intervalMed"]) if "netidStatus" in data: netid_status = [] for status in data["netidStatus"]: netid_status.append(status) uwpassword.netid_status = netid_status return uwpassword
[ "def", "_process_json", "(", "response_body", ")", ":", "data", "=", "json", ".", "loads", "(", "response_body", ")", "uwpassword", "=", "UwPassword", "(", "uwnetid", "=", "data", "[", "\"uwNetID\"", "]", ",", "kerb_status", "=", "data", "[", "\"kerbStatus\"...
Returns a UwPassword objects
[ "Returns", "a", "UwPassword", "objects" ]
train
https://github.com/uw-it-aca/uw-restclients-uwnetid/blob/58c78b564f9c920a8f8fd408eec959ddd5605b0b/uw_uwnetid/password.py#L32-L66
CodyKochmann/time_limit
commit-update.py
create_next_tag
def create_next_tag(): """ creates a tag based on the date and previous tags """ date = datetime.utcnow() date_tag = '{}.{}.{}'.format(date.year, date.month, date.day) if date_tag in latest_tag(): # if there was an update already today latest = latest_tag().split('.') # split by spaces if len(latest) == 4: # if it is not the first revision of the day latest[-1]= str(int(latest[-1])+1) else: # if it is the first revision of the day latest+=['1'] date_tag = '.'.join(latest) return date_tag
python
def create_next_tag(): """ creates a tag based on the date and previous tags """ date = datetime.utcnow() date_tag = '{}.{}.{}'.format(date.year, date.month, date.day) if date_tag in latest_tag(): # if there was an update already today latest = latest_tag().split('.') # split by spaces if len(latest) == 4: # if it is not the first revision of the day latest[-1]= str(int(latest[-1])+1) else: # if it is the first revision of the day latest+=['1'] date_tag = '.'.join(latest) return date_tag
[ "def", "create_next_tag", "(", ")", ":", "date", "=", "datetime", ".", "utcnow", "(", ")", "date_tag", "=", "'{}.{}.{}'", ".", "format", "(", "date", ".", "year", ",", "date", ".", "month", ",", "date", ".", "day", ")", "if", "date_tag", "in", "lates...
creates a tag based on the date and previous tags
[ "creates", "a", "tag", "based", "on", "the", "date", "and", "previous", "tags" ]
train
https://github.com/CodyKochmann/time_limit/blob/447a640d3e187bb4775d780b757c6d9bdc88ae34/commit-update.py#L25-L36
CodyKochmann/time_limit
commit-update.py
sync_readmes
def sync_readmes(): """ just copies README.md into README for pypi documentation """ print("syncing README") with open("README.md", 'r') as reader: file_text = reader.read() with open("README", 'w') as writer: writer.write(file_text)
python
def sync_readmes(): """ just copies README.md into README for pypi documentation """ print("syncing README") with open("README.md", 'r') as reader: file_text = reader.read() with open("README", 'w') as writer: writer.write(file_text)
[ "def", "sync_readmes", "(", ")", ":", "print", "(", "\"syncing README\"", ")", "with", "open", "(", "\"README.md\"", ",", "'r'", ")", "as", "reader", ":", "file_text", "=", "reader", ".", "read", "(", ")", "with", "open", "(", "\"README\"", ",", "'w'", ...
just copies README.md into README for pypi documentation
[ "just", "copies", "README", ".", "md", "into", "README", "for", "pypi", "documentation" ]
train
https://github.com/CodyKochmann/time_limit/blob/447a640d3e187bb4775d780b757c6d9bdc88ae34/commit-update.py#L50-L56
jacebrowning/comparable
comparable/simple.py
Number.similarity
def similarity(self, other): """Get similarity as a ratio of the two numbers.""" numerator, denominator = sorted((self.value, other.value)) try: ratio = float(numerator) / denominator except ZeroDivisionError: ratio = 0.0 if numerator else 1.0 similarity = self.Similarity(ratio) return similarity
python
def similarity(self, other): """Get similarity as a ratio of the two numbers.""" numerator, denominator = sorted((self.value, other.value)) try: ratio = float(numerator) / denominator except ZeroDivisionError: ratio = 0.0 if numerator else 1.0 similarity = self.Similarity(ratio) return similarity
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "numerator", ",", "denominator", "=", "sorted", "(", "(", "self", ".", "value", ",", "other", ".", "value", ")", ")", "try", ":", "ratio", "=", "float", "(", "numerator", ")", "/", "denominato...
Get similarity as a ratio of the two numbers.
[ "Get", "similarity", "as", "a", "ratio", "of", "the", "two", "numbers", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L44-L52
jacebrowning/comparable
comparable/simple.py
Text.similarity
def similarity(self, other): """Get similarity as a ratio of the two texts.""" ratio = SequenceMatcher(a=self.value, b=other.value).ratio() similarity = self.Similarity(ratio) return similarity
python
def similarity(self, other): """Get similarity as a ratio of the two texts.""" ratio = SequenceMatcher(a=self.value, b=other.value).ratio() similarity = self.Similarity(ratio) return similarity
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "ratio", "=", "SequenceMatcher", "(", "a", "=", "self", ".", "value", ",", "b", "=", "other", ".", "value", ")", ".", "ratio", "(", ")", "similarity", "=", "self", ".", "Similarity", "(", "r...
Get similarity as a ratio of the two texts.
[ "Get", "similarity", "as", "a", "ratio", "of", "the", "two", "texts", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L65-L69
jacebrowning/comparable
comparable/simple.py
TextEnum.similarity
def similarity(self, other): """Get similarity as a discrete ratio (1.0 or 0.0).""" ratio = 1.0 if (str(self).lower() == str(other).lower()) else 0.0 similarity = self.Similarity(ratio) return similarity
python
def similarity(self, other): """Get similarity as a discrete ratio (1.0 or 0.0).""" ratio = 1.0 if (str(self).lower() == str(other).lower()) else 0.0 similarity = self.Similarity(ratio) return similarity
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "ratio", "=", "1.0", "if", "(", "str", "(", "self", ")", ".", "lower", "(", ")", "==", "str", "(", "other", ")", ".", "lower", "(", ")", ")", "else", "0.0", "similarity", "=", "self", "....
Get similarity as a discrete ratio (1.0 or 0.0).
[ "Get", "similarity", "as", "a", "discrete", "ratio", "(", "1", ".", "0", "or", "0", ".", "0", ")", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L78-L82
jacebrowning/comparable
comparable/simple.py
TextTitle._strip
def _strip(text): """Strip articles/whitespace and remove case.""" text = text.strip() text = text.replace(' ', ' ') # remove duplicate spaces text = text.lower() for joiner in TextTitle.JOINERS: text = text.replace(joiner, 'and') for article in TextTitle.ARTICLES: if text.startswith(article + ' '): text = text[len(article) + 1:] break return text
python
def _strip(text): """Strip articles/whitespace and remove case.""" text = text.strip() text = text.replace(' ', ' ') # remove duplicate spaces text = text.lower() for joiner in TextTitle.JOINERS: text = text.replace(joiner, 'and') for article in TextTitle.ARTICLES: if text.startswith(article + ' '): text = text[len(article) + 1:] break return text
[ "def", "_strip", "(", "text", ")", ":", "text", "=", "text", ".", "strip", "(", ")", "text", "=", "text", ".", "replace", "(", "' '", ",", "' '", ")", "# remove duplicate spaces", "text", "=", "text", ".", "lower", "(", ")", "for", "joiner", "in", ...
Strip articles/whitespace and remove case.
[ "Strip", "articles", "/", "whitespace", "and", "remove", "case", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L100-L111
jacebrowning/comparable
comparable/simple.py
TextTitle.similarity
def similarity(self, other): """Get similarity as a ratio of the stripped text.""" logging.debug("comparing %r and %r...", self.stripped, other.stripped) ratio = SequenceMatcher(a=self.stripped, b=other.stripped).ratio() similarity = self.Similarity(ratio) return similarity
python
def similarity(self, other): """Get similarity as a ratio of the stripped text.""" logging.debug("comparing %r and %r...", self.stripped, other.stripped) ratio = SequenceMatcher(a=self.stripped, b=other.stripped).ratio() similarity = self.Similarity(ratio) return similarity
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "logging", ".", "debug", "(", "\"comparing %r and %r...\"", ",", "self", ".", "stripped", ",", "other", ".", "stripped", ")", "ratio", "=", "SequenceMatcher", "(", "a", "=", "self", ".", "stripped",...
Get similarity as a ratio of the stripped text.
[ "Get", "similarity", "as", "a", "ratio", "of", "the", "stripped", "text", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L113-L118
azraq27/neural
neural/wrappers/fsl.py
skull_strip
def skull_strip(dset,suffix='_ns',prefix=None,unifize=True): ''' use bet to strip skull from given anatomy ''' # should add options to use betsurf and T1/T2 in the future # Since BET fails on weirdly distributed datasets, I added 3dUnifize in... I realize this makes this dependent on AFNI. Sorry, :) if prefix==None: prefix = nl.suffix(dset,suffix) unifize_dset = nl.suffix(dset,'_u') cmd = bet2 if bet2 else 'bet2' if unifize: info = nl.dset_info(dset) if info==None: nl.notify('Error: could not read info for dset %s' % dset,level=nl.level.error) return False cmd = os.path.join(fsl_dir,cmd) if fsl_dir else cmd cutoff_value = nl.max(dset) * 0.05 nl.run(['3dUnifize','-prefix',unifize_dset,nl.calc(dset,'step(a-%f)*a' % cutoff_value)],products=unifize_dset) else: unifize_dset = dset nl.run([cmd,unifize_dset,prefix,'-w',0.5],products=prefix)
python
def skull_strip(dset,suffix='_ns',prefix=None,unifize=True): ''' use bet to strip skull from given anatomy ''' # should add options to use betsurf and T1/T2 in the future # Since BET fails on weirdly distributed datasets, I added 3dUnifize in... I realize this makes this dependent on AFNI. Sorry, :) if prefix==None: prefix = nl.suffix(dset,suffix) unifize_dset = nl.suffix(dset,'_u') cmd = bet2 if bet2 else 'bet2' if unifize: info = nl.dset_info(dset) if info==None: nl.notify('Error: could not read info for dset %s' % dset,level=nl.level.error) return False cmd = os.path.join(fsl_dir,cmd) if fsl_dir else cmd cutoff_value = nl.max(dset) * 0.05 nl.run(['3dUnifize','-prefix',unifize_dset,nl.calc(dset,'step(a-%f)*a' % cutoff_value)],products=unifize_dset) else: unifize_dset = dset nl.run([cmd,unifize_dset,prefix,'-w',0.5],products=prefix)
[ "def", "skull_strip", "(", "dset", ",", "suffix", "=", "'_ns'", ",", "prefix", "=", "None", ",", "unifize", "=", "True", ")", ":", "# should add options to use betsurf and T1/T2 in the future", "# Since BET fails on weirdly distributed datasets, I added 3dUnifize in... I realiz...
use bet to strip skull from given anatomy
[ "use", "bet", "to", "strip", "skull", "from", "given", "anatomy" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/wrappers/fsl.py#L23-L41
nim65s/ndh
ndh/utils.py
full_url
def full_url(url='', domain=None, protocol='https'): """ Prepend protocol (default to https) and domain name (default from the Site framework) to an url """ if domain is None: from django.contrib.sites.models import Site domain = Site.objects.get_current().domain return f'{protocol}://{domain}{url}'
python
def full_url(url='', domain=None, protocol='https'): """ Prepend protocol (default to https) and domain name (default from the Site framework) to an url """ if domain is None: from django.contrib.sites.models import Site domain = Site.objects.get_current().domain return f'{protocol}://{domain}{url}'
[ "def", "full_url", "(", "url", "=", "''", ",", "domain", "=", "None", ",", "protocol", "=", "'https'", ")", ":", "if", "domain", "is", "None", ":", "from", "django", ".", "contrib", ".", "sites", ".", "models", "import", "Site", "domain", "=", "Site"...
Prepend protocol (default to https) and domain name (default from the Site framework) to an url
[ "Prepend", "protocol", "(", "default", "to", "https", ")", "and", "domain", "name", "(", "default", "from", "the", "Site", "framework", ")", "to", "an", "url" ]
train
https://github.com/nim65s/ndh/blob/3e14644e3f701044acbb7aafbf69b51ad6f86d99/ndh/utils.py#L6-L13
nim65s/ndh
ndh/utils.py
query_sum
def query_sum(queryset, field): """ Let the DBMS perform a sum on a queryset """ return queryset.aggregate(s=models.functions.Coalesce(models.Sum(field), 0))['s']
python
def query_sum(queryset, field): """ Let the DBMS perform a sum on a queryset """ return queryset.aggregate(s=models.functions.Coalesce(models.Sum(field), 0))['s']
[ "def", "query_sum", "(", "queryset", ",", "field", ")", ":", "return", "queryset", ".", "aggregate", "(", "s", "=", "models", ".", "functions", ".", "Coalesce", "(", "models", ".", "Sum", "(", "field", ")", ",", "0", ")", ")", "[", "'s'", "]" ]
Let the DBMS perform a sum on a queryset
[ "Let", "the", "DBMS", "perform", "a", "sum", "on", "a", "queryset" ]
train
https://github.com/nim65s/ndh/blob/3e14644e3f701044acbb7aafbf69b51ad6f86d99/ndh/utils.py#L20-L24