repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
lobeck/flask-bower
flask_bower/__init__.py
build_url
def build_url(component, filename, **values): """ search bower asset and build url :param component: bower component (package) :type component: str :param filename: filename in bower component - can contain directories (like dist/jquery.js) :type filename: str :param values: additional url parameters :type values: dict[str, str] :return: url :rtype: str | None """ root = current_app.config['BOWER_COMPONENTS_ROOT'] bower_data = None package_data = None # check if component exists in bower_components directory if not os.path.isdir(os.path.join(current_app.root_path, root, component)): # FallBack to default url_for flask return None # load bower.json of specified component bower_file_path = os.path.join(current_app.root_path, root, component, 'bower.json') if os.path.exists(bower_file_path): with open(bower_file_path, 'r') as bower_file: bower_data = json.load(bower_file) # check if package.json exists and load package.json data package_file_path = os.path.join(current_app.root_path, root, component, 'package.json') if os.path.exists(package_file_path): with open(package_file_path, 'r') as package_file: package_data = json.load(package_file) # check if specified file actually exists if not os.path.exists(os.path.join(current_app.root_path, root, component, filename)): return None # check if minified file exists (by pattern <filename>.min.<ext> # returns filename if successful if current_app.config['BOWER_TRY_MINIFIED']: if '.min.' not in filename: minified_filename = '%s.min.%s' % tuple(filename.rsplit('.', 1)) minified_path = os.path.join(root, component, minified_filename) if os.path.exists(os.path.join(current_app.root_path, minified_path)): filename = minified_filename # determine version of component and append as ?version= parameter to allow cache busting if current_app.config['BOWER_QUERYSTRING_REVVING']: if bower_data is not None and 'version' in bower_data: values['version'] = bower_data['version'] elif package_data is not None and 'version' in package_data: values['version'] = package_data['version'] else: values['version'] = os.path.getmtime(os.path.join(current_app.root_path, root, component, filename)) return url_for('bower.serve', component=component, filename=filename, **values)
python
def build_url(component, filename, **values): """ search bower asset and build url :param component: bower component (package) :type component: str :param filename: filename in bower component - can contain directories (like dist/jquery.js) :type filename: str :param values: additional url parameters :type values: dict[str, str] :return: url :rtype: str | None """ root = current_app.config['BOWER_COMPONENTS_ROOT'] bower_data = None package_data = None # check if component exists in bower_components directory if not os.path.isdir(os.path.join(current_app.root_path, root, component)): # FallBack to default url_for flask return None # load bower.json of specified component bower_file_path = os.path.join(current_app.root_path, root, component, 'bower.json') if os.path.exists(bower_file_path): with open(bower_file_path, 'r') as bower_file: bower_data = json.load(bower_file) # check if package.json exists and load package.json data package_file_path = os.path.join(current_app.root_path, root, component, 'package.json') if os.path.exists(package_file_path): with open(package_file_path, 'r') as package_file: package_data = json.load(package_file) # check if specified file actually exists if not os.path.exists(os.path.join(current_app.root_path, root, component, filename)): return None # check if minified file exists (by pattern <filename>.min.<ext> # returns filename if successful if current_app.config['BOWER_TRY_MINIFIED']: if '.min.' not in filename: minified_filename = '%s.min.%s' % tuple(filename.rsplit('.', 1)) minified_path = os.path.join(root, component, minified_filename) if os.path.exists(os.path.join(current_app.root_path, minified_path)): filename = minified_filename # determine version of component and append as ?version= parameter to allow cache busting if current_app.config['BOWER_QUERYSTRING_REVVING']: if bower_data is not None and 'version' in bower_data: values['version'] = bower_data['version'] elif package_data is not None and 'version' in package_data: values['version'] = package_data['version'] else: values['version'] = os.path.getmtime(os.path.join(current_app.root_path, root, component, filename)) return url_for('bower.serve', component=component, filename=filename, **values)
[ "def", "build_url", "(", "component", ",", "filename", ",", "*", "*", "values", ")", ":", "root", "=", "current_app", ".", "config", "[", "'BOWER_COMPONENTS_ROOT'", "]", "bower_data", "=", "None", "package_data", "=", "None", "# check if component exists in bower_...
search bower asset and build url :param component: bower component (package) :type component: str :param filename: filename in bower component - can contain directories (like dist/jquery.js) :type filename: str :param values: additional url parameters :type values: dict[str, str] :return: url :rtype: str | None
[ "search", "bower", "asset", "and", "build", "url" ]
train
https://github.com/lobeck/flask-bower/blob/3ebe08a0931d07e82cb57998db3390d2b5921444/flask_bower/__init__.py#L100-L157
shmir/PyIxNetwork
ixnetwork/ixn_app.py
init_ixn
def init_ixn(api, logger, install_dir=None): """ Create IXN object. :param api: tcl/python/rest :type api: trafficgenerator.tgn_utils.ApiType :param logger: logger object :param install_dir: IXN installation directory :return: IXN object """ if api == ApiType.tcl: api_wrapper = IxnTclWrapper(logger, install_dir) elif api == ApiType.python: api_wrapper = IxnPythonWrapper(logger, install_dir) elif api == ApiType.rest: api_wrapper = IxnRestWrapper(logger) else: raise TgnError('{} API not supported - use Tcl, python or REST'.format(api)) return IxnApp(logger, api_wrapper)
python
def init_ixn(api, logger, install_dir=None): """ Create IXN object. :param api: tcl/python/rest :type api: trafficgenerator.tgn_utils.ApiType :param logger: logger object :param install_dir: IXN installation directory :return: IXN object """ if api == ApiType.tcl: api_wrapper = IxnTclWrapper(logger, install_dir) elif api == ApiType.python: api_wrapper = IxnPythonWrapper(logger, install_dir) elif api == ApiType.rest: api_wrapper = IxnRestWrapper(logger) else: raise TgnError('{} API not supported - use Tcl, python or REST'.format(api)) return IxnApp(logger, api_wrapper)
[ "def", "init_ixn", "(", "api", ",", "logger", ",", "install_dir", "=", "None", ")", ":", "if", "api", "==", "ApiType", ".", "tcl", ":", "api_wrapper", "=", "IxnTclWrapper", "(", "logger", ",", "install_dir", ")", "elif", "api", "==", "ApiType", ".", "p...
Create IXN object. :param api: tcl/python/rest :type api: trafficgenerator.tgn_utils.ApiType :param logger: logger object :param install_dir: IXN installation directory :return: IXN object
[ "Create", "IXN", "object", "." ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_app.py#L34-L52
shmir/PyIxNetwork
ixnetwork/ixn_app.py
IxnApp.disconnect
def disconnect(self): """ Disconnect from chassis and server. """ if self.root.ref is not None: self.api.disconnect() self.root = None
python
def disconnect(self): """ Disconnect from chassis and server. """ if self.root.ref is not None: self.api.disconnect() self.root = None
[ "def", "disconnect", "(", "self", ")", ":", "if", "self", ".", "root", ".", "ref", "is", "not", "None", ":", "self", ".", "api", ".", "disconnect", "(", ")", "self", ".", "root", "=", "None" ]
Disconnect from chassis and server.
[ "Disconnect", "from", "chassis", "and", "server", "." ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_app.py#L77-L81
shmir/PyIxNetwork
ixnetwork/ixn_app.py
IxnApp.reserve
def reserve(self, ports, force=False, wait_for_up=True, timeout=80): """ Reserve port and optionally wait for port to come up. :param ports: dict of <port, ip/module/port'>. :param force: whether to revoke existing reservation (True) or not (False). :param wait_for_up: True - wait for port to come up, False - return immediately. :param timeout: how long (seconds) to wait for port to come up. """ if force: for port in ports: port.release() for port, location in ports.items(): port.reserve(location, False, wait_for_up, timeout)
python
def reserve(self, ports, force=False, wait_for_up=True, timeout=80): """ Reserve port and optionally wait for port to come up. :param ports: dict of <port, ip/module/port'>. :param force: whether to revoke existing reservation (True) or not (False). :param wait_for_up: True - wait for port to come up, False - return immediately. :param timeout: how long (seconds) to wait for port to come up. """ if force: for port in ports: port.release() for port, location in ports.items(): port.reserve(location, False, wait_for_up, timeout)
[ "def", "reserve", "(", "self", ",", "ports", ",", "force", "=", "False", ",", "wait_for_up", "=", "True", ",", "timeout", "=", "80", ")", ":", "if", "force", ":", "for", "port", "in", "ports", ":", "port", ".", "release", "(", ")", "for", "port", ...
Reserve port and optionally wait for port to come up. :param ports: dict of <port, ip/module/port'>. :param force: whether to revoke existing reservation (True) or not (False). :param wait_for_up: True - wait for port to come up, False - return immediately. :param timeout: how long (seconds) to wait for port to come up.
[ "Reserve", "port", "and", "optionally", "wait", "for", "port", "to", "come", "up", "." ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_app.py#L112-L126
saghul/evergreen
evergreen/timeout.py
Timeout.start
def start(self): """Schedule the timeout. This is called on construction, so it should not be called explicitly, unless the timer has been canceled.""" assert not self._timer, '%r is already started; to restart it, cancel it first' % self loop = evergreen.current.loop current = evergreen.current.task if self.seconds is None or self.seconds < 0: # "fake" timeout (never expires) self._timer = None elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self self._timer = loop.call_later(self.seconds, self._timer_cb, current.throw, self) else: # regular timeout with user-provided exception self._timer = loop.call_later(self.seconds, self._timer_cb, current.throw, self.exception)
python
def start(self): """Schedule the timeout. This is called on construction, so it should not be called explicitly, unless the timer has been canceled.""" assert not self._timer, '%r is already started; to restart it, cancel it first' % self loop = evergreen.current.loop current = evergreen.current.task if self.seconds is None or self.seconds < 0: # "fake" timeout (never expires) self._timer = None elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self self._timer = loop.call_later(self.seconds, self._timer_cb, current.throw, self) else: # regular timeout with user-provided exception self._timer = loop.call_later(self.seconds, self._timer_cb, current.throw, self.exception)
[ "def", "start", "(", "self", ")", ":", "assert", "not", "self", ".", "_timer", ",", "'%r is already started; to restart it, cancel it first'", "%", "self", "loop", "=", "evergreen", ".", "current", ".", "loop", "current", "=", "evergreen", ".", "current", ".", ...
Schedule the timeout. This is called on construction, so it should not be called explicitly, unless the timer has been canceled.
[ "Schedule", "the", "timeout", ".", "This", "is", "called", "on", "construction", "so", "it", "should", "not", "be", "called", "explicitly", "unless", "the", "timer", "has", "been", "canceled", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/timeout.py#L28-L43
karel-brinda/rnftools
rnftools/rnfformat/FqCreator.py
FqCreator.flush_read_tuple
def flush_read_tuple(self): """Flush the internal buffer of reads. """ if not self.is_empty(): suffix_comment_buffer = [] if self._info_simulator is not None: suffix_comment_buffer.append(self._info_simulator) if self._info_reads_in_tuple: # todo: orientation (FF, FR, etc.) # orientation="".join([]) suffix_comment_buffer.append("reads-in-tuple:{}".format(len(self.seqs_bases))) if len(suffix_comment_buffer) != 0: suffix_comment = "[{}]".format(",".join(suffix_comment_buffer)) else: suffix_comment = "" rnf_name = self._rnf_profile.get_rnf_name( rnftools.rnfformat.ReadTuple( segments=self.segments, read_tuple_id=self.current_read_tuple_id, suffix=suffix_comment, ) ) fq_reads = [ os.linesep.join( [ "@{rnf_name}{read_suffix}".format( rnf_name=rnf_name, read_suffix="/{}".format(str(i + 1)) if len(self.seqs_bases) > 1 else "", ), self.seqs_bases[i], "+", self.seqs_qualities[i], ] ) for i in range(len(self.seqs_bases)) ] self._fq_file.write(os.linesep.join(fq_reads)) self._fq_file.write(os.linesep) self.empty()
python
def flush_read_tuple(self): """Flush the internal buffer of reads. """ if not self.is_empty(): suffix_comment_buffer = [] if self._info_simulator is not None: suffix_comment_buffer.append(self._info_simulator) if self._info_reads_in_tuple: # todo: orientation (FF, FR, etc.) # orientation="".join([]) suffix_comment_buffer.append("reads-in-tuple:{}".format(len(self.seqs_bases))) if len(suffix_comment_buffer) != 0: suffix_comment = "[{}]".format(",".join(suffix_comment_buffer)) else: suffix_comment = "" rnf_name = self._rnf_profile.get_rnf_name( rnftools.rnfformat.ReadTuple( segments=self.segments, read_tuple_id=self.current_read_tuple_id, suffix=suffix_comment, ) ) fq_reads = [ os.linesep.join( [ "@{rnf_name}{read_suffix}".format( rnf_name=rnf_name, read_suffix="/{}".format(str(i + 1)) if len(self.seqs_bases) > 1 else "", ), self.seqs_bases[i], "+", self.seqs_qualities[i], ] ) for i in range(len(self.seqs_bases)) ] self._fq_file.write(os.linesep.join(fq_reads)) self._fq_file.write(os.linesep) self.empty()
[ "def", "flush_read_tuple", "(", "self", ")", ":", "if", "not", "self", ".", "is_empty", "(", ")", ":", "suffix_comment_buffer", "=", "[", "]", "if", "self", ".", "_info_simulator", "is", "not", "None", ":", "suffix_comment_buffer", ".", "append", "(", "sel...
Flush the internal buffer of reads.
[ "Flush", "the", "internal", "buffer", "of", "reads", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/FqCreator.py#L49-L87
karel-brinda/rnftools
rnftools/rnfformat/FqCreator.py
FqCreator.add_read
def add_read( self, read_tuple_id, bases, qualities, segments, ): """Add a new read to the current buffer. If it is a new read tuple (detected from ID), the buffer will be flushed. Args: read_tuple_id (int): ID of the read tuple. bases (str): Sequence of bases. qualities (str): Sequence of FASTQ qualities. segments (list of rnftools.rnfformat.segment): List of segments constituting the read. """ assert type(bases) is str, "Wrong type of bases: '{}'".format(bases) assert type(qualities) is str, "Wrong type of qualities: '{}'".format(qualities) assert type(segments) is tuple or type(segments) is list if self.current_read_tuple_id != read_tuple_id: self.flush_read_tuple() self.current_read_tuple_id = read_tuple_id self.seqs_bases.append(bases) self.seqs_qualities.append(qualities) self.segments.extend(segments)
python
def add_read( self, read_tuple_id, bases, qualities, segments, ): """Add a new read to the current buffer. If it is a new read tuple (detected from ID), the buffer will be flushed. Args: read_tuple_id (int): ID of the read tuple. bases (str): Sequence of bases. qualities (str): Sequence of FASTQ qualities. segments (list of rnftools.rnfformat.segment): List of segments constituting the read. """ assert type(bases) is str, "Wrong type of bases: '{}'".format(bases) assert type(qualities) is str, "Wrong type of qualities: '{}'".format(qualities) assert type(segments) is tuple or type(segments) is list if self.current_read_tuple_id != read_tuple_id: self.flush_read_tuple() self.current_read_tuple_id = read_tuple_id self.seqs_bases.append(bases) self.seqs_qualities.append(qualities) self.segments.extend(segments)
[ "def", "add_read", "(", "self", ",", "read_tuple_id", ",", "bases", ",", "qualities", ",", "segments", ",", ")", ":", "assert", "type", "(", "bases", ")", "is", "str", ",", "\"Wrong type of bases: '{}'\"", ".", "format", "(", "bases", ")", "assert", "type"...
Add a new read to the current buffer. If it is a new read tuple (detected from ID), the buffer will be flushed. Args: read_tuple_id (int): ID of the read tuple. bases (str): Sequence of bases. qualities (str): Sequence of FASTQ qualities. segments (list of rnftools.rnfformat.segment): List of segments constituting the read.
[ "Add", "a", "new", "read", "to", "the", "current", "buffer", ".", "If", "it", "is", "a", "new", "read", "tuple", "(", "detected", "from", "ID", ")", "the", "buffer", "will", "be", "flushed", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/FqCreator.py#L101-L127
hickeroar/simplebayes
simplebayes/categories.py
BayesCategories.add_category
def add_category(self, name): """ Adds a bayes category that we can later train :param name: name of the category :type name: str :return: the requested category :rtype: BayesCategory """ category = BayesCategory(name) self.categories[name] = category return category
python
def add_category(self, name): """ Adds a bayes category that we can later train :param name: name of the category :type name: str :return: the requested category :rtype: BayesCategory """ category = BayesCategory(name) self.categories[name] = category return category
[ "def", "add_category", "(", "self", ",", "name", ")", ":", "category", "=", "BayesCategory", "(", "name", ")", "self", ".", "categories", "[", "name", "]", "=", "category", "return", "category" ]
Adds a bayes category that we can later train :param name: name of the category :type name: str :return: the requested category :rtype: BayesCategory
[ "Adds", "a", "bayes", "category", "that", "we", "can", "later", "train" ]
train
https://github.com/hickeroar/simplebayes/blob/b8da72c50d20b6f8c0df2c2f39620715b08ddd32/simplebayes/categories.py#L33-L44
carljm/pytest-smartcov
smartcov.py
FilteredStream.line_is_interesting
def line_is_interesting(self, line): """Return True, False, or None. True means always output, False means never output, None means output only if there are interesting lines. """ if line.startswith('Name'): return None if line.startswith('--------'): return None if line.startswith('TOTAL'): return None if '100%' in line: return False if line == '\n': return None if self._last_line_was_printable else False return True
python
def line_is_interesting(self, line): """Return True, False, or None. True means always output, False means never output, None means output only if there are interesting lines. """ if line.startswith('Name'): return None if line.startswith('--------'): return None if line.startswith('TOTAL'): return None if '100%' in line: return False if line == '\n': return None if self._last_line_was_printable else False return True
[ "def", "line_is_interesting", "(", "self", ",", "line", ")", ":", "if", "line", ".", "startswith", "(", "'Name'", ")", ":", "return", "None", "if", "line", ".", "startswith", "(", "'--------'", ")", ":", "return", "None", "if", "line", ".", "startswith",...
Return True, False, or None. True means always output, False means never output, None means output only if there are interesting lines.
[ "Return", "True", "False", "or", "None", "." ]
train
https://github.com/carljm/pytest-smartcov/blob/24dd83369bc3511710fc617d108b2d5bcff97ba8/smartcov.py#L126-L143
bioasp/caspo
caspo/core/graph.py
Graph.from_tuples
def from_tuples(cls, tuples): """ Creates a graph from an iterable of tuples describing edges like (source, target, sign) Parameters ---------- tuples : iterable[(str,str,int))] Tuples describing signed and directed edges Returns ------- caspo.core.graph.Graph Created object instance """ return cls(it.imap(lambda (source, target, sign): (source, target, {'sign': sign}), tuples))
python
def from_tuples(cls, tuples): """ Creates a graph from an iterable of tuples describing edges like (source, target, sign) Parameters ---------- tuples : iterable[(str,str,int))] Tuples describing signed and directed edges Returns ------- caspo.core.graph.Graph Created object instance """ return cls(it.imap(lambda (source, target, sign): (source, target, {'sign': sign}), tuples))
[ "def", "from_tuples", "(", "cls", ",", "tuples", ")", ":", "return", "cls", "(", "it", ".", "imap", "(", "lambda", "(", "source", ",", "target", ",", "sign", ")", ":", "(", "source", ",", "target", ",", "{", "'sign'", ":", "sign", "}", ")", ",", ...
Creates a graph from an iterable of tuples describing edges like (source, target, sign) Parameters ---------- tuples : iterable[(str,str,int))] Tuples describing signed and directed edges Returns ------- caspo.core.graph.Graph Created object instance
[ "Creates", "a", "graph", "from", "an", "iterable", "of", "tuples", "describing", "edges", "like", "(", "source", "target", "sign", ")" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/graph.py#L31-L45
bioasp/caspo
caspo/core/graph.py
Graph.read_sif
def read_sif(cls, path): """ Creates a graph from a `simple interaction format (SIF)`_ file Parameters ---------- path : str Absolute path to a SIF file Returns ------- caspo.core.graph.Graph Created object instance .. _simple interaction format (SIF): http://wiki.cytoscape.org/Cytoscape_User_Manual/Network_Formats """ df = pd.read_csv(path, delim_whitespace=True, names=['source', 'sign', 'target']).drop_duplicates() edges = [(source, target, {'sign': sign}) for _, source, sign, target in df.itertuples()] return cls(data=edges)
python
def read_sif(cls, path): """ Creates a graph from a `simple interaction format (SIF)`_ file Parameters ---------- path : str Absolute path to a SIF file Returns ------- caspo.core.graph.Graph Created object instance .. _simple interaction format (SIF): http://wiki.cytoscape.org/Cytoscape_User_Manual/Network_Formats """ df = pd.read_csv(path, delim_whitespace=True, names=['source', 'sign', 'target']).drop_duplicates() edges = [(source, target, {'sign': sign}) for _, source, sign, target in df.itertuples()] return cls(data=edges)
[ "def", "read_sif", "(", "cls", ",", "path", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "path", ",", "delim_whitespace", "=", "True", ",", "names", "=", "[", "'source'", ",", "'sign'", ",", "'target'", "]", ")", ".", "drop_duplicates", "(", ")",...
Creates a graph from a `simple interaction format (SIF)`_ file Parameters ---------- path : str Absolute path to a SIF file Returns ------- caspo.core.graph.Graph Created object instance .. _simple interaction format (SIF): http://wiki.cytoscape.org/Cytoscape_User_Manual/Network_Formats
[ "Creates", "a", "graph", "from", "a", "simple", "interaction", "format", "(", "SIF", ")", "_", "file" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/graph.py#L48-L67
bioasp/caspo
caspo/core/graph.py
Graph.predecessors
def predecessors(self, node, exclude_compressed=True): """ Returns the list of predecessors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the predecessors list Returns ------- list List of predecessors nodes """ preds = super(Graph, self).predecessors(node) if exclude_compressed: return [n for n in preds if not self.node[n].get('compressed', False)] else: return preds
python
def predecessors(self, node, exclude_compressed=True): """ Returns the list of predecessors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the predecessors list Returns ------- list List of predecessors nodes """ preds = super(Graph, self).predecessors(node) if exclude_compressed: return [n for n in preds if not self.node[n].get('compressed', False)] else: return preds
[ "def", "predecessors", "(", "self", ",", "node", ",", "exclude_compressed", "=", "True", ")", ":", "preds", "=", "super", "(", "Graph", ",", "self", ")", ".", "predecessors", "(", "node", ")", "if", "exclude_compressed", ":", "return", "[", "n", "for", ...
Returns the list of predecessors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the predecessors list Returns ------- list List of predecessors nodes
[ "Returns", "the", "list", "of", "predecessors", "of", "a", "given", "node" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/graph.py#L69-L90
bioasp/caspo
caspo/core/graph.py
Graph.successors
def successors(self, node, exclude_compressed=True): """ Returns the list of successors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the successors list Returns ------- list List of successors nodes """ succs = super(Graph, self).successors(node) if exclude_compressed: return [n for n in succs if not self.node[n].get('compressed', False)] else: return succs
python
def successors(self, node, exclude_compressed=True): """ Returns the list of successors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the successors list Returns ------- list List of successors nodes """ succs = super(Graph, self).successors(node) if exclude_compressed: return [n for n in succs if not self.node[n].get('compressed', False)] else: return succs
[ "def", "successors", "(", "self", ",", "node", ",", "exclude_compressed", "=", "True", ")", ":", "succs", "=", "super", "(", "Graph", ",", "self", ")", ".", "successors", "(", "node", ")", "if", "exclude_compressed", ":", "return", "[", "n", "for", "n"...
Returns the list of successors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the successors list Returns ------- list List of successors nodes
[ "Returns", "the", "list", "of", "successors", "of", "a", "given", "node" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/graph.py#L92-L113
bioasp/caspo
caspo/core/graph.py
Graph.compress
def compress(self, setup): """ Returns the compressed graph according to the given experimental setup Parameters ---------- setup : :class:`caspo.core.setup.Setup` Experimental setup used to compress the graph Returns ------- caspo.core.graph.Graph Compressed graph """ designated = set(setup.nodes) zipped = self.copy() marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)] while marked: for node, _ in sorted(marked): backward = zipped.predecessors(node) forward = zipped.successors(node) if not backward or (len(backward) == 1 and not backward[0] in forward): self.__merge_source_targets(node, zipped) elif not forward or (len(forward) == 1 and not forward[0] in backward): self.__merge_target_sources(node, zipped) else: designated.add(node) marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)] not_compressed = [(n, d) for n, d in zipped.nodes(data=True) if not d.get('compressed', False)] return zipped.subgraph([n for n, _ in not_compressed])
python
def compress(self, setup): """ Returns the compressed graph according to the given experimental setup Parameters ---------- setup : :class:`caspo.core.setup.Setup` Experimental setup used to compress the graph Returns ------- caspo.core.graph.Graph Compressed graph """ designated = set(setup.nodes) zipped = self.copy() marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)] while marked: for node, _ in sorted(marked): backward = zipped.predecessors(node) forward = zipped.successors(node) if not backward or (len(backward) == 1 and not backward[0] in forward): self.__merge_source_targets(node, zipped) elif not forward or (len(forward) == 1 and not forward[0] in backward): self.__merge_target_sources(node, zipped) else: designated.add(node) marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)] not_compressed = [(n, d) for n, d in zipped.nodes(data=True) if not d.get('compressed', False)] return zipped.subgraph([n for n, _ in not_compressed])
[ "def", "compress", "(", "self", ",", "setup", ")", ":", "designated", "=", "set", "(", "setup", ".", "nodes", ")", "zipped", "=", "self", ".", "copy", "(", ")", "marked", "=", "[", "(", "n", ",", "d", ")", "for", "n", ",", "d", "in", "self", ...
Returns the compressed graph according to the given experimental setup Parameters ---------- setup : :class:`caspo.core.setup.Setup` Experimental setup used to compress the graph Returns ------- caspo.core.graph.Graph Compressed graph
[ "Returns", "the", "compressed", "graph", "according", "to", "the", "given", "experimental", "setup" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/graph.py#L115-L150
karel-brinda/rnftools
rnftools/rnfformat/RnfProfile.py
RnfProfile.combine
def combine(*rnf_profiles): """Combine more profiles and set their maximal values. Args: *rnf_profiles (rnftools.rnfformat.RnfProfile): RNF profile. """ for rnf_profile in rnf_profiles: self.prefix_width = max(self.prefix_width, rnf_profile.prefix_width) self.read_tuple_id_width = max(self.read_tuple_id_width, rnf_profile.read_tuple_id_width) self.genome_id_width = max(self.genome_id_width, rnf_profile.genome_id_width) self.chr_id_width = max(self.chr_id_width, rnf_profile.chr_id_width) self.coor_width = max(self.coor_width, rnf_profile.coor_width)
python
def combine(*rnf_profiles): """Combine more profiles and set their maximal values. Args: *rnf_profiles (rnftools.rnfformat.RnfProfile): RNF profile. """ for rnf_profile in rnf_profiles: self.prefix_width = max(self.prefix_width, rnf_profile.prefix_width) self.read_tuple_id_width = max(self.read_tuple_id_width, rnf_profile.read_tuple_id_width) self.genome_id_width = max(self.genome_id_width, rnf_profile.genome_id_width) self.chr_id_width = max(self.chr_id_width, rnf_profile.chr_id_width) self.coor_width = max(self.coor_width, rnf_profile.coor_width)
[ "def", "combine", "(", "*", "rnf_profiles", ")", ":", "for", "rnf_profile", "in", "rnf_profiles", ":", "self", ".", "prefix_width", "=", "max", "(", "self", ".", "prefix_width", ",", "rnf_profile", ".", "prefix_width", ")", "self", ".", "read_tuple_id_width", ...
Combine more profiles and set their maximal values. Args: *rnf_profiles (rnftools.rnfformat.RnfProfile): RNF profile.
[ "Combine", "more", "profiles", "and", "set", "their", "maximal", "values", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/RnfProfile.py#L46-L58
karel-brinda/rnftools
rnftools/rnfformat/RnfProfile.py
RnfProfile.load
def load(self, read_tuple_name): """Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from. """ self.prefix_width = 0 self.read_tuple_id_width = 0 self.genome_id_width = 0 self.chr_id_width = 0 self.coor_width = 0 parts = read_tuple_name.split("__") self.prefix_width = len(parts[0]) self.read_tuple_id_width = len(parts[1]) segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) self.genome_id_width = max(self.genome_id_width, int_widths[0]) self.chr_id_width = max(self.chr_id_width, int_widths[1]) self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])
python
def load(self, read_tuple_name): """Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from. """ self.prefix_width = 0 self.read_tuple_id_width = 0 self.genome_id_width = 0 self.chr_id_width = 0 self.coor_width = 0 parts = read_tuple_name.split("__") self.prefix_width = len(parts[0]) self.read_tuple_id_width = len(parts[1]) segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) self.genome_id_width = max(self.genome_id_width, int_widths[0]) self.chr_id_width = max(self.chr_id_width, int_widths[1]) self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])
[ "def", "load", "(", "self", ",", "read_tuple_name", ")", ":", "self", ".", "prefix_width", "=", "0", "self", ".", "read_tuple_id_width", "=", "0", "self", ".", "genome_id_width", "=", "0", "self", ".", "chr_id_width", "=", "0", "self", ".", "coor_width", ...
Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from.
[ "Load", "RNF", "values", "from", "a", "read", "tuple", "name", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/RnfProfile.py#L60-L81
karel-brinda/rnftools
rnftools/rnfformat/RnfProfile.py
RnfProfile.apply
def apply(self, read_tuple_name, read_tuple_id=None, synchronize_widths=True): """Apply profile on a read tuple name and update read tuple ID. Args: read_tuple_name (str): Read tuple name to be updated. read_tuple_id (id): New read tuple ID. synchronize_widths (bool): Update widths (in accordance to this profile). """ parts = read_tuple_name.split("__") parts[0] = self._fill_right(parts[0], "-", self.prefix_width) if read_tuple_id is not None: parts[1] = "{:x}".format(read_tuple_id) parts[1] = self._fill_left(parts[1], "0", self.read_tuple_id_width) if synchronize_widths: new_segments = [] segments = parts[2][1:-1].split("),(") for segment in segments: values = segment.split(",") values[0] = values[0].zfill(self.genome_id_width) values[1] = values[1].zfill(self.chr_id_width) values[3] = values[3].zfill(self.coor_width) values[4] = values[4].zfill(self.coor_width) new_segments.append("(" + ",".join(values) + ")") parts[2] = ",".join(new_segments) return "__".join(parts)
python
def apply(self, read_tuple_name, read_tuple_id=None, synchronize_widths=True): """Apply profile on a read tuple name and update read tuple ID. Args: read_tuple_name (str): Read tuple name to be updated. read_tuple_id (id): New read tuple ID. synchronize_widths (bool): Update widths (in accordance to this profile). """ parts = read_tuple_name.split("__") parts[0] = self._fill_right(parts[0], "-", self.prefix_width) if read_tuple_id is not None: parts[1] = "{:x}".format(read_tuple_id) parts[1] = self._fill_left(parts[1], "0", self.read_tuple_id_width) if synchronize_widths: new_segments = [] segments = parts[2][1:-1].split("),(") for segment in segments: values = segment.split(",") values[0] = values[0].zfill(self.genome_id_width) values[1] = values[1].zfill(self.chr_id_width) values[3] = values[3].zfill(self.coor_width) values[4] = values[4].zfill(self.coor_width) new_segments.append("(" + ",".join(values) + ")") parts[2] = ",".join(new_segments) return "__".join(parts)
[ "def", "apply", "(", "self", ",", "read_tuple_name", ",", "read_tuple_id", "=", "None", ",", "synchronize_widths", "=", "True", ")", ":", "parts", "=", "read_tuple_name", ".", "split", "(", "\"__\"", ")", "parts", "[", "0", "]", "=", "self", ".", "_fill_...
Apply profile on a read tuple name and update read tuple ID. Args: read_tuple_name (str): Read tuple name to be updated. read_tuple_id (id): New read tuple ID. synchronize_widths (bool): Update widths (in accordance to this profile).
[ "Apply", "profile", "on", "a", "read", "tuple", "name", "and", "update", "read", "tuple", "ID", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/RnfProfile.py#L83-L109
karel-brinda/rnftools
rnftools/rnfformat/RnfProfile.py
RnfProfile.check
def check(self, read_tuple_name): """Check if the given read tuple name satisfies this profile. Args: read_tuple_name (str): Read tuple name. """ parts = read_tuple_name.split("__") if len(parts[0]) != self.prefix_width or len(parts[1]) != self.read_tuple_id_width: return False segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) if self.genome_id_width != int_widths[0]: return False if self.chr_id_width != int_widths[1]: return False if self.coor_width != int_widths[3] or self.coor_width != int_widths[4]: return False return True
python
def check(self, read_tuple_name): """Check if the given read tuple name satisfies this profile. Args: read_tuple_name (str): Read tuple name. """ parts = read_tuple_name.split("__") if len(parts[0]) != self.prefix_width or len(parts[1]) != self.read_tuple_id_width: return False segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) if self.genome_id_width != int_widths[0]: return False if self.chr_id_width != int_widths[1]: return False if self.coor_width != int_widths[3] or self.coor_width != int_widths[4]: return False return True
[ "def", "check", "(", "self", ",", "read_tuple_name", ")", ":", "parts", "=", "read_tuple_name", ".", "split", "(", "\"__\"", ")", "if", "len", "(", "parts", "[", "0", "]", ")", "!=", "self", ".", "prefix_width", "or", "len", "(", "parts", "[", "1", ...
Check if the given read tuple name satisfies this profile. Args: read_tuple_name (str): Read tuple name.
[ "Check", "if", "the", "given", "read", "tuple", "name", "satisfies", "this", "profile", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/RnfProfile.py#L111-L133
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/definition.py
Definition.get_column_definition_all
def get_column_definition_all(self, table): """Retrieve the column definition statement for all columns in a table.""" # Get complete table definition col_defs = self.get_table_definition(table).split('\n') # Return only column definitions return [i[0:-1].strip().replace(',', ', ') for i in col_defs if i.strip().startswith('`')]
python
def get_column_definition_all(self, table): """Retrieve the column definition statement for all columns in a table.""" # Get complete table definition col_defs = self.get_table_definition(table).split('\n') # Return only column definitions return [i[0:-1].strip().replace(',', ', ') for i in col_defs if i.strip().startswith('`')]
[ "def", "get_column_definition_all", "(", "self", ",", "table", ")", ":", "# Get complete table definition", "col_defs", "=", "self", ".", "get_table_definition", "(", "table", ")", ".", "split", "(", "'\\n'", ")", "# Return only column definitions", "return", "[", "...
Retrieve the column definition statement for all columns in a table.
[ "Retrieve", "the", "column", "definition", "statement", "for", "all", "columns", "in", "a", "table", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/definition.py#L6-L12
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/definition.py
Definition.get_column_definition
def get_column_definition(self, table, column): """Retrieve the column definition statement for a column from a table.""" # Parse column definitions for match for col in self.get_column_definition_all(table): if col.strip('`').startswith(column): return col.strip(',')
python
def get_column_definition(self, table, column): """Retrieve the column definition statement for a column from a table.""" # Parse column definitions for match for col in self.get_column_definition_all(table): if col.strip('`').startswith(column): return col.strip(',')
[ "def", "get_column_definition", "(", "self", ",", "table", ",", "column", ")", ":", "# Parse column definitions for match", "for", "col", "in", "self", ".", "get_column_definition_all", "(", "table", ")", ":", "if", "col", ".", "strip", "(", "'`'", ")", ".", ...
Retrieve the column definition statement for a column from a table.
[ "Retrieve", "the", "column", "definition", "statement", "for", "a", "column", "from", "a", "table", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/definition.py#L14-L19
saghul/evergreen
evergreen/patcher.py
original
def original(modname): """ This returns an unpatched version of a module.""" # note that it's not necessary to temporarily install unpatched # versions of all patchable modules during the import of the # module; this is because none of them import each other, except # for threading which imports thread original_name = '__original_module_' + modname if original_name in sys.modules: return sys.modules.get(original_name) # re-import the "pure" module and store it in the global _originals # dict; be sure to restore whatever module had that name already saver = SysModulesSaver((modname,)) sys.modules.pop(modname, None) try: real_mod = __import__(modname, {}, {}, modname.split('.')[:-1]) # save a reference to the unpatched module so it doesn't get lost sys.modules[original_name] = real_mod finally: saver.restore() return sys.modules[original_name]
python
def original(modname): """ This returns an unpatched version of a module.""" # note that it's not necessary to temporarily install unpatched # versions of all patchable modules during the import of the # module; this is because none of them import each other, except # for threading which imports thread original_name = '__original_module_' + modname if original_name in sys.modules: return sys.modules.get(original_name) # re-import the "pure" module and store it in the global _originals # dict; be sure to restore whatever module had that name already saver = SysModulesSaver((modname,)) sys.modules.pop(modname, None) try: real_mod = __import__(modname, {}, {}, modname.split('.')[:-1]) # save a reference to the unpatched module so it doesn't get lost sys.modules[original_name] = real_mod finally: saver.restore() return sys.modules[original_name]
[ "def", "original", "(", "modname", ")", ":", "# note that it's not necessary to temporarily install unpatched", "# versions of all patchable modules during the import of the", "# module; this is because none of them import each other, except", "# for threading which imports thread", "original_na...
This returns an unpatched version of a module.
[ "This", "returns", "an", "unpatched", "version", "of", "a", "module", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/patcher.py#L54-L75
saghul/evergreen
evergreen/patcher.py
patch
def patch(**on): """Globally patches certain system modules to be 'cooperaive'. The keyword arguments afford some control over which modules are patched. If no keyword arguments are supplied, all possible modules are patched. If keywords are set to True, only the specified modules are patched. E.g., ``monkey_patch(socket=True, select=True)`` patches only the select and socket modules. Most arguments patch the single module of the same name (os, time, select). The exception is socket, which also patches the ssl module if present. It's safe to call monkey_patch multiple times. """ accepted_args = set(('select', 'socket', 'time')) default_on = on.pop("all", None) for k in on.keys(): if k not in accepted_args: raise TypeError("patch() got an unexpected keyword argument %r" % k) if default_on is None: default_on = not (True in list(on.values())) for modname in accepted_args: on.setdefault(modname, default_on) modules_to_patch = [] if on['select'] and not already_patched.get('select'): modules_to_patch += _select_modules() already_patched['select'] = True if on['socket'] and not already_patched.get('socket'): modules_to_patch += _socket_modules() already_patched['socket'] = True if on['time'] and not already_patched.get('time'): modules_to_patch += _time_modules() already_patched['time'] = True imp.acquire_lock() try: for name, mod in modules_to_patch: orig_mod = sys.modules.get(name) if orig_mod is None: orig_mod = __import__(name) for attr_name in mod.__patched__: patched_attr = getattr(mod, attr_name, None) if patched_attr is not None: setattr(orig_mod, attr_name, patched_attr) finally: imp.release_lock()
python
def patch(**on): """Globally patches certain system modules to be 'cooperaive'. The keyword arguments afford some control over which modules are patched. If no keyword arguments are supplied, all possible modules are patched. If keywords are set to True, only the specified modules are patched. E.g., ``monkey_patch(socket=True, select=True)`` patches only the select and socket modules. Most arguments patch the single module of the same name (os, time, select). The exception is socket, which also patches the ssl module if present. It's safe to call monkey_patch multiple times. """ accepted_args = set(('select', 'socket', 'time')) default_on = on.pop("all", None) for k in on.keys(): if k not in accepted_args: raise TypeError("patch() got an unexpected keyword argument %r" % k) if default_on is None: default_on = not (True in list(on.values())) for modname in accepted_args: on.setdefault(modname, default_on) modules_to_patch = [] if on['select'] and not already_patched.get('select'): modules_to_patch += _select_modules() already_patched['select'] = True if on['socket'] and not already_patched.get('socket'): modules_to_patch += _socket_modules() already_patched['socket'] = True if on['time'] and not already_patched.get('time'): modules_to_patch += _time_modules() already_patched['time'] = True imp.acquire_lock() try: for name, mod in modules_to_patch: orig_mod = sys.modules.get(name) if orig_mod is None: orig_mod = __import__(name) for attr_name in mod.__patched__: patched_attr = getattr(mod, attr_name, None) if patched_attr is not None: setattr(orig_mod, attr_name, patched_attr) finally: imp.release_lock()
[ "def", "patch", "(", "*", "*", "on", ")", ":", "accepted_args", "=", "set", "(", "(", "'select'", ",", "'socket'", ",", "'time'", ")", ")", "default_on", "=", "on", ".", "pop", "(", "\"all\"", ",", "None", ")", "for", "k", "in", "on", ".", "keys"...
Globally patches certain system modules to be 'cooperaive'. The keyword arguments afford some control over which modules are patched. If no keyword arguments are supplied, all possible modules are patched. If keywords are set to True, only the specified modules are patched. E.g., ``monkey_patch(socket=True, select=True)`` patches only the select and socket modules. Most arguments patch the single module of the same name (os, time, select). The exception is socket, which also patches the ssl module if present. It's safe to call monkey_patch multiple times.
[ "Globally", "patches", "certain", "system", "modules", "to", "be", "cooperaive", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/patcher.py#L80-L125
saghul/evergreen
evergreen/patcher.py
inject
def inject(module_name, new_globals, *additional_modules): """Base method for "injecting" greened modules into an imported module. It imports the module specified in *module_name*, arranging things so that the already-imported modules in *additional_modules* are used when *module_name* makes its imports. *new_globals* is either None or a globals dictionary that gets populated with the contents of the *module_name* module. This is useful when creating a "green" version of some other module. *additional_modules* should be a collection of two-element tuples, of the form (<name>, <module>). If it's not specified, a default selection of name/module pairs is used, which should cover all use cases but may be slower because there are inevitably redundant or unnecessary imports. """ patched_name = '__patched_module_' + module_name if patched_name in sys.modules: # returning already-patched module so as not to destroy existing # references to patched modules return sys.modules[patched_name] if not additional_modules: # supply some defaults additional_modules = (_select_modules() + _socket_modules() + _time_modules()) # after this we are gonna screw with sys.modules, so capture the # state of all the modules we're going to mess with, and lock saver = SysModulesSaver([name for name, m in additional_modules]) saver.save(module_name) # Cover the target modules so that when you import the module it # sees only the patched versions for name, mod in additional_modules: sys.modules[name] = mod ## Remove the old module from sys.modules and reimport it while ## the specified modules are in place sys.modules.pop(module_name, None) try: module = __import__(module_name, {}, {}, module_name.split('.')[:-1]) if new_globals is not None: ## Update the given globals dictionary with everything from this new module for name in dir(module): if name not in __exclude: new_globals[name] = getattr(module, name) ## Keep a reference to the new module to prevent it from dying sys.modules[patched_name] = module finally: saver.restore() # Put the original modules back return module
python
def inject(module_name, new_globals, *additional_modules): """Base method for "injecting" greened modules into an imported module. It imports the module specified in *module_name*, arranging things so that the already-imported modules in *additional_modules* are used when *module_name* makes its imports. *new_globals* is either None or a globals dictionary that gets populated with the contents of the *module_name* module. This is useful when creating a "green" version of some other module. *additional_modules* should be a collection of two-element tuples, of the form (<name>, <module>). If it's not specified, a default selection of name/module pairs is used, which should cover all use cases but may be slower because there are inevitably redundant or unnecessary imports. """ patched_name = '__patched_module_' + module_name if patched_name in sys.modules: # returning already-patched module so as not to destroy existing # references to patched modules return sys.modules[patched_name] if not additional_modules: # supply some defaults additional_modules = (_select_modules() + _socket_modules() + _time_modules()) # after this we are gonna screw with sys.modules, so capture the # state of all the modules we're going to mess with, and lock saver = SysModulesSaver([name for name, m in additional_modules]) saver.save(module_name) # Cover the target modules so that when you import the module it # sees only the patched versions for name, mod in additional_modules: sys.modules[name] = mod ## Remove the old module from sys.modules and reimport it while ## the specified modules are in place sys.modules.pop(module_name, None) try: module = __import__(module_name, {}, {}, module_name.split('.')[:-1]) if new_globals is not None: ## Update the given globals dictionary with everything from this new module for name in dir(module): if name not in __exclude: new_globals[name] = getattr(module, name) ## Keep a reference to the new module to prevent it from dying sys.modules[patched_name] = module finally: saver.restore() # Put the original modules back return module
[ "def", "inject", "(", "module_name", ",", "new_globals", ",", "*", "additional_modules", ")", ":", "patched_name", "=", "'__patched_module_'", "+", "module_name", "if", "patched_name", "in", "sys", ".", "modules", ":", "# returning already-patched module so as not to de...
Base method for "injecting" greened modules into an imported module. It imports the module specified in *module_name*, arranging things so that the already-imported modules in *additional_modules* are used when *module_name* makes its imports. *new_globals* is either None or a globals dictionary that gets populated with the contents of the *module_name* module. This is useful when creating a "green" version of some other module. *additional_modules* should be a collection of two-element tuples, of the form (<name>, <module>). If it's not specified, a default selection of name/module pairs is used, which should cover all use cases but may be slower because there are inevitably redundant or unnecessary imports.
[ "Base", "method", "for", "injecting", "greened", "modules", "into", "an", "imported", "module", ".", "It", "imports", "the", "module", "specified", "in", "*", "module_name", "*", "arranging", "things", "so", "that", "the", "already", "-", "imported", "modules"...
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/patcher.py#L158-L212
saghul/evergreen
evergreen/patcher.py
slurp_properties
def slurp_properties(source, destination, ignore=[], srckeys=None): """Copy properties from *source* (assumed to be a module) to *destination* (assumed to be a dict). *ignore* lists properties that should not be thusly copied. *srckeys* is a list of keys to copy, if the source's __all__ is untrustworthy. """ if srckeys is None: srckeys = source.__all__ destination.update(dict([(name, getattr(source, name)) for name in srckeys if not (name.startswith('__') or name in ignore) ]))
python
def slurp_properties(source, destination, ignore=[], srckeys=None): """Copy properties from *source* (assumed to be a module) to *destination* (assumed to be a dict). *ignore* lists properties that should not be thusly copied. *srckeys* is a list of keys to copy, if the source's __all__ is untrustworthy. """ if srckeys is None: srckeys = source.__all__ destination.update(dict([(name, getattr(source, name)) for name in srckeys if not (name.startswith('__') or name in ignore) ]))
[ "def", "slurp_properties", "(", "source", ",", "destination", ",", "ignore", "=", "[", "]", ",", "srckeys", "=", "None", ")", ":", "if", "srckeys", "is", "None", ":", "srckeys", "=", "source", ".", "__all__", "destination", ".", "update", "(", "dict", ...
Copy properties from *source* (assumed to be a module) to *destination* (assumed to be a dict). *ignore* lists properties that should not be thusly copied. *srckeys* is a list of keys to copy, if the source's __all__ is untrustworthy.
[ "Copy", "properties", "from", "*", "source", "*", "(", "assumed", "to", "be", "a", "module", ")", "to", "*", "destination", "*", "(", "assumed", "to", "be", "a", "dict", ")", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/patcher.py#L215-L228
saghul/evergreen
evergreen/patcher.py
SysModulesSaver.save
def save(self, *module_names): """Saves the named modules to the object.""" for modname in module_names: self._saved[modname] = sys.modules.get(modname, None)
python
def save(self, *module_names): """Saves the named modules to the object.""" for modname in module_names: self._saved[modname] = sys.modules.get(modname, None)
[ "def", "save", "(", "self", ",", "*", "module_names", ")", ":", "for", "modname", "in", "module_names", ":", "self", ".", "_saved", "[", "modname", "]", "=", "sys", ".", "modules", ".", "get", "(", "modname", ",", "None", ")" ]
Saves the named modules to the object.
[ "Saves", "the", "named", "modules", "to", "the", "object", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/patcher.py#L22-L25
saghul/evergreen
evergreen/patcher.py
SysModulesSaver.restore
def restore(self): """Restores the modules that the saver knows about into sys.modules. """ try: for modname, mod in self._saved.items(): if mod is not None: sys.modules[modname] = mod else: try: del sys.modules[modname] except KeyError: pass finally: imp.release_lock()
python
def restore(self): """Restores the modules that the saver knows about into sys.modules. """ try: for modname, mod in self._saved.items(): if mod is not None: sys.modules[modname] = mod else: try: del sys.modules[modname] except KeyError: pass finally: imp.release_lock()
[ "def", "restore", "(", "self", ")", ":", "try", ":", "for", "modname", ",", "mod", "in", "self", ".", "_saved", ".", "items", "(", ")", ":", "if", "mod", "is", "not", "None", ":", "sys", ".", "modules", "[", "modname", "]", "=", "mod", "else", ...
Restores the modules that the saver knows about into sys.modules.
[ "Restores", "the", "modules", "that", "the", "saver", "knows", "about", "into", "sys", ".", "modules", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/patcher.py#L27-L41
LIVVkit/LIVVkit
livvkit/util/LIVVDict.py
LIVVDict.nested_insert
def nested_insert(self, item_list): """ Create a series of nested LIVVDicts given a list """ if len(item_list) == 1: self[item_list[0]] = LIVVDict() elif len(item_list) > 1: if item_list[0] not in self: self[item_list[0]] = LIVVDict() self[item_list[0]].nested_insert(item_list[1:])
python
def nested_insert(self, item_list): """ Create a series of nested LIVVDicts given a list """ if len(item_list) == 1: self[item_list[0]] = LIVVDict() elif len(item_list) > 1: if item_list[0] not in self: self[item_list[0]] = LIVVDict() self[item_list[0]].nested_insert(item_list[1:])
[ "def", "nested_insert", "(", "self", ",", "item_list", ")", ":", "if", "len", "(", "item_list", ")", "==", "1", ":", "self", "[", "item_list", "[", "0", "]", "]", "=", "LIVVDict", "(", ")", "elif", "len", "(", "item_list", ")", ">", "1", ":", "if...
Create a series of nested LIVVDicts given a list
[ "Create", "a", "series", "of", "nested", "LIVVDicts", "given", "a", "list" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/LIVVDict.py#L49-L56
LIVVkit/LIVVkit
livvkit/util/LIVVDict.py
LIVVDict.nested_assign
def nested_assign(self, key_list, value): """ Set the value of nested LIVVDicts given a list """ if len(key_list) == 1: self[key_list[0]] = value elif len(key_list) > 1: if key_list[0] not in self: self[key_list[0]] = LIVVDict() self[key_list[0]].nested_assign(key_list[1:], value)
python
def nested_assign(self, key_list, value): """ Set the value of nested LIVVDicts given a list """ if len(key_list) == 1: self[key_list[0]] = value elif len(key_list) > 1: if key_list[0] not in self: self[key_list[0]] = LIVVDict() self[key_list[0]].nested_assign(key_list[1:], value)
[ "def", "nested_assign", "(", "self", ",", "key_list", ",", "value", ")", ":", "if", "len", "(", "key_list", ")", "==", "1", ":", "self", "[", "key_list", "[", "0", "]", "]", "=", "value", "elif", "len", "(", "key_list", ")", ">", "1", ":", "if", ...
Set the value of nested LIVVDicts given a list
[ "Set", "the", "value", "of", "nested", "LIVVDicts", "given", "a", "list" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/LIVVDict.py#L58-L65
praekelt/django-order
order/signal_handlers.py
post_save
def post_save(sender, instance, created, **kwargs): """ After save create order instance for sending instance for orderable models. """ # Only create order model instances for # those modules specified in settings. model_label = '.'.join([sender._meta.app_label, sender._meta.object_name]) labels = resolve_labels(model_label) order_field_names = is_orderable(model_label) if order_field_names: orderitem_set = getattr( instance, resolve_order_item_related_set_name(labels) ) if not orderitem_set.all(): fields = {} for order_field_name in order_field_names: fields[order_field_name] = 1 orderitem_set.model.objects.create(item=instance, **fields) sanitize_order(orderitem_set.model)
python
def post_save(sender, instance, created, **kwargs): """ After save create order instance for sending instance for orderable models. """ # Only create order model instances for # those modules specified in settings. model_label = '.'.join([sender._meta.app_label, sender._meta.object_name]) labels = resolve_labels(model_label) order_field_names = is_orderable(model_label) if order_field_names: orderitem_set = getattr( instance, resolve_order_item_related_set_name(labels) ) if not orderitem_set.all(): fields = {} for order_field_name in order_field_names: fields[order_field_name] = 1 orderitem_set.model.objects.create(item=instance, **fields) sanitize_order(orderitem_set.model)
[ "def", "post_save", "(", "sender", ",", "instance", ",", "created", ",", "*", "*", "kwargs", ")", ":", "# Only create order model instances for", "# those modules specified in settings.", "model_label", "=", "'.'", ".", "join", "(", "[", "sender", ".", "_meta", "....
After save create order instance for sending instance for orderable models.
[ "After", "save", "create", "order", "instance", "for", "sending", "instance", "for", "orderable", "models", "." ]
train
https://github.com/praekelt/django-order/blob/dcffeb5b28d460872f7d47675c4bfc8a32c807a3/order/signal_handlers.py#L5-L25
theonion/django-bulbs
bulbs/instant_articles/parser.py
parse_children
def parse_children(parent): """Recursively parse child tags until match is found""" components = [] for tag in parent.children: matched = parse_tag(tag) if matched: components.append(matched) elif hasattr(tag, 'contents'): components += parse_children(tag) return components
python
def parse_children(parent): """Recursively parse child tags until match is found""" components = [] for tag in parent.children: matched = parse_tag(tag) if matched: components.append(matched) elif hasattr(tag, 'contents'): components += parse_children(tag) return components
[ "def", "parse_children", "(", "parent", ")", ":", "components", "=", "[", "]", "for", "tag", "in", "parent", ".", "children", ":", "matched", "=", "parse_tag", "(", "tag", ")", "if", "matched", ":", "components", ".", "append", "(", "matched", ")", "el...
Recursively parse child tags until match is found
[ "Recursively", "parse", "child", "tags", "until", "match", "is", "found" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/instant_articles/parser.py#L166-L176
anomaly/prestans
prestans/types/data_url_file.py
DataURLFile.save
def save(self, path): """ Writes file to a particular location This won't work for cloud environments like Google's App Engine, use with caution ensure to catch exceptions so you can provide informed feedback. prestans does not mask File IO exceptions so your handler can respond better. """ file_handle = open(path, 'wb') file_handle.write(self._file_contents) file_handle.close()
python
def save(self, path): """ Writes file to a particular location This won't work for cloud environments like Google's App Engine, use with caution ensure to catch exceptions so you can provide informed feedback. prestans does not mask File IO exceptions so your handler can respond better. """ file_handle = open(path, 'wb') file_handle.write(self._file_contents) file_handle.close()
[ "def", "save", "(", "self", ",", "path", ")", ":", "file_handle", "=", "open", "(", "path", ",", "'wb'", ")", "file_handle", ".", "write", "(", "self", ".", "_file_contents", ")", "file_handle", ".", "close", "(", ")" ]
Writes file to a particular location This won't work for cloud environments like Google's App Engine, use with caution ensure to catch exceptions so you can provide informed feedback. prestans does not mask File IO exceptions so your handler can respond better.
[ "Writes", "file", "to", "a", "particular", "location" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/types/data_url_file.py#L130-L142
PGower/PyCanvas
pycanvas/apis/grade_change_log.py
GradeChangeLogAPI.query_by_assignment
def query_by_assignment(self, assignment_id, end_time=None, start_time=None): """ Query by assignment. List grade change events for a given assignment. """ path = {} data = {} params = {} # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/assignments/{assignment_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/assignments/{assignment_id}".format(**path), data=data, params=params, all_pages=True)
python
def query_by_assignment(self, assignment_id, end_time=None, start_time=None): """ Query by assignment. List grade change events for a given assignment. """ path = {} data = {} params = {} # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/assignments/{assignment_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/assignments/{assignment_id}".format(**path), data=data, params=params, all_pages=True)
[ "def", "query_by_assignment", "(", "self", ",", "assignment_id", ",", "end_time", "=", "None", ",", "start_time", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - assignment_id\r", "\"\"\"ID\"...
Query by assignment. List grade change events for a given assignment.
[ "Query", "by", "assignment", ".", "List", "grade", "change", "events", "for", "a", "given", "assignment", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/grade_change_log.py#L19-L44
PGower/PyCanvas
pycanvas/apis/grade_change_log.py
GradeChangeLogAPI.query_by_student
def query_by_student(self, student_id, end_time=None, start_time=None): """ Query by student. List grade change events for a given student. """ path = {} data = {} params = {} # REQUIRED - PATH - student_id """ID""" path["student_id"] = student_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/students/{student_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/students/{student_id}".format(**path), data=data, params=params, all_pages=True)
python
def query_by_student(self, student_id, end_time=None, start_time=None): """ Query by student. List grade change events for a given student. """ path = {} data = {} params = {} # REQUIRED - PATH - student_id """ID""" path["student_id"] = student_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/students/{student_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/students/{student_id}".format(**path), data=data, params=params, all_pages=True)
[ "def", "query_by_student", "(", "self", ",", "student_id", ",", "end_time", "=", "None", ",", "start_time", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - student_id\r", "\"\"\"ID\"\"\"", ...
Query by student. List grade change events for a given student.
[ "Query", "by", "student", ".", "List", "grade", "change", "events", "for", "a", "given", "student", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/grade_change_log.py#L73-L98
PGower/PyCanvas
pycanvas/apis/grade_change_log.py
GradeChangeLogAPI.query_by_grader
def query_by_grader(self, grader_id, end_time=None, start_time=None): """ Query by grader. List grade change events for a given grader. """ path = {} data = {} params = {} # REQUIRED - PATH - grader_id """ID""" path["grader_id"] = grader_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/graders/{grader_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/graders/{grader_id}".format(**path), data=data, params=params, all_pages=True)
python
def query_by_grader(self, grader_id, end_time=None, start_time=None): """ Query by grader. List grade change events for a given grader. """ path = {} data = {} params = {} # REQUIRED - PATH - grader_id """ID""" path["grader_id"] = grader_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/graders/{grader_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/graders/{grader_id}".format(**path), data=data, params=params, all_pages=True)
[ "def", "query_by_grader", "(", "self", ",", "grader_id", ",", "end_time", "=", "None", ",", "start_time", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - grader_id\r", "\"\"\"ID\"\"\"", "pa...
Query by grader. List grade change events for a given grader.
[ "Query", "by", "grader", ".", "List", "grade", "change", "events", "for", "a", "given", "grader", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/grade_change_log.py#L100-L125
joeyespo/nosey
nosey/command.py
main
def main(argv=None): """The entry point of the application.""" if argv is None: argv = sys.argv[1:] usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:]) version = 'Nosey ' + __version__ # Parse options args = docopt(usage, argv=argv, version=version) # Execute return watch(args['<directory>'], args['--clear'])
python
def main(argv=None): """The entry point of the application.""" if argv is None: argv = sys.argv[1:] usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:]) version = 'Nosey ' + __version__ # Parse options args = docopt(usage, argv=argv, version=version) # Execute return watch(args['<directory>'], args['--clear'])
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "usage", "=", "'\\n\\n\\n'", ".", "join", "(", "__doc__", ".", "split", "(", "'\\n\\n\\n'", ")", "[", "1", ...
The entry point of the application.
[ "The", "entry", "point", "of", "the", "application", "." ]
train
https://github.com/joeyespo/nosey/blob/8194c3e1b06d95ab623b561fd0f26b2d3017446c/nosey/command.py#L23-L34
anomaly/prestans
prestans/rest/response.py
Response._set_serializer_by_mime_type
def _set_serializer_by_mime_type(self, mime_type): """ :param mime_type: :return: used by content_type_set to set get a reference to the appropriate serializer """ # ignore if binary response if isinstance(self._app_iter, BinaryResponse): self.logger.info("ignoring setting serializer for binary response") return for available_serializer in self._serializers: if available_serializer.content_type() == mime_type: self._selected_serializer = available_serializer self.logger.info("set serializer for mime type: %s" % mime_type) return self.logger.info("could not find serializer for mime type: %s" % mime_type) raise exception.UnsupportedVocabularyError(mime_type, self.supported_mime_types_str)
python
def _set_serializer_by_mime_type(self, mime_type): """ :param mime_type: :return: used by content_type_set to set get a reference to the appropriate serializer """ # ignore if binary response if isinstance(self._app_iter, BinaryResponse): self.logger.info("ignoring setting serializer for binary response") return for available_serializer in self._serializers: if available_serializer.content_type() == mime_type: self._selected_serializer = available_serializer self.logger.info("set serializer for mime type: %s" % mime_type) return self.logger.info("could not find serializer for mime type: %s" % mime_type) raise exception.UnsupportedVocabularyError(mime_type, self.supported_mime_types_str)
[ "def", "_set_serializer_by_mime_type", "(", "self", ",", "mime_type", ")", ":", "# ignore if binary response", "if", "isinstance", "(", "self", ".", "_app_iter", ",", "BinaryResponse", ")", ":", "self", ".", "logger", ".", "info", "(", "\"ignoring setting serializer...
:param mime_type: :return: used by content_type_set to set get a reference to the appropriate serializer
[ ":", "param", "mime_type", ":", ":", "return", ":" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/rest/response.py#L76-L96
anomaly/prestans
prestans/rest/response.py
Response.register_serializers
def register_serializers(self, serializers): """ Adds extra serializers; generally registered during the handler lifecycle """ for new_serializer in serializers: if not isinstance(new_serializer, serializer.Base): msg = "registered serializer %s.%s does not inherit from prestans.serializer.Serializer" % ( new_serializer.__module__, new_serializer.__class__.__name__ ) raise TypeError(msg) self._serializers = self._serializers + serializers
python
def register_serializers(self, serializers): """ Adds extra serializers; generally registered during the handler lifecycle """ for new_serializer in serializers: if not isinstance(new_serializer, serializer.Base): msg = "registered serializer %s.%s does not inherit from prestans.serializer.Serializer" % ( new_serializer.__module__, new_serializer.__class__.__name__ ) raise TypeError(msg) self._serializers = self._serializers + serializers
[ "def", "register_serializers", "(", "self", ",", "serializers", ")", ":", "for", "new_serializer", "in", "serializers", ":", "if", "not", "isinstance", "(", "new_serializer", ",", "serializer", ".", "Base", ")", ":", "msg", "=", "\"registered serializer %s.%s does...
Adds extra serializers; generally registered during the handler lifecycle
[ "Adds", "extra", "serializers", ";", "generally", "registered", "during", "the", "handler", "lifecycle" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/rest/response.py#L232-L245
theonion/django-bulbs
bulbs/contributions/email.py
ContributorReport.is_valid
def is_valid(self): """returns `True` if the report should be sent.""" if not self.total: return False if not self.contributor.freelanceprofile.is_freelance: return False return True
python
def is_valid(self): """returns `True` if the report should be sent.""" if not self.total: return False if not self.contributor.freelanceprofile.is_freelance: return False return True
[ "def", "is_valid", "(", "self", ")", ":", "if", "not", "self", ".", "total", ":", "return", "False", "if", "not", "self", ".", "contributor", ".", "freelanceprofile", ".", "is_freelance", ":", "return", "False", "return", "True" ]
returns `True` if the report should be sent.
[ "returns", "True", "if", "the", "report", "should", "be", "sent", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L71-L77
theonion/django-bulbs
bulbs/contributions/email.py
ContributorReport.contributions
def contributions(self): """Apply a datetime filter against the contributor's contribution queryset.""" if self._contributions is None: self._contributions = self.contributor.contributions.filter( content__published__gte=self.start, content__published__lt=self.end ) return self._contributions
python
def contributions(self): """Apply a datetime filter against the contributor's contribution queryset.""" if self._contributions is None: self._contributions = self.contributor.contributions.filter( content__published__gte=self.start, content__published__lt=self.end ) return self._contributions
[ "def", "contributions", "(", "self", ")", ":", "if", "self", ".", "_contributions", "is", "None", ":", "self", ".", "_contributions", "=", "self", ".", "contributor", ".", "contributions", ".", "filter", "(", "content__published__gte", "=", "self", ".", "sta...
Apply a datetime filter against the contributor's contribution queryset.
[ "Apply", "a", "datetime", "filter", "against", "the", "contributor", "s", "contribution", "queryset", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L80-L87
theonion/django-bulbs
bulbs/contributions/email.py
ContributorReport.line_items
def line_items(self): """Apply a datetime filter against the contributors's line item queryset.""" if self._line_items is None: self._line_items = self.contributor.line_items.filter( payment_date__range=(self.start, self.end) ) return self._line_items
python
def line_items(self): """Apply a datetime filter against the contributors's line item queryset.""" if self._line_items is None: self._line_items = self.contributor.line_items.filter( payment_date__range=(self.start, self.end) ) return self._line_items
[ "def", "line_items", "(", "self", ")", ":", "if", "self", ".", "_line_items", "is", "None", ":", "self", ".", "_line_items", "=", "self", ".", "contributor", ".", "line_items", ".", "filter", "(", "payment_date__range", "=", "(", "self", ".", "start", ",...
Apply a datetime filter against the contributors's line item queryset.
[ "Apply", "a", "datetime", "filter", "against", "the", "contributors", "s", "line", "item", "queryset", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L90-L96
theonion/django-bulbs
bulbs/contributions/email.py
ContributorReport.deadline
def deadline(self): """Return next day as deadline if no deadline provided.""" if not self._deadline: self._deadline = self.now + timezone.timedelta(days=1) return self._deadline
python
def deadline(self): """Return next day as deadline if no deadline provided.""" if not self._deadline: self._deadline = self.now + timezone.timedelta(days=1) return self._deadline
[ "def", "deadline", "(", "self", ")", ":", "if", "not", "self", ".", "_deadline", ":", "self", ".", "_deadline", "=", "self", ".", "now", "+", "timezone", ".", "timedelta", "(", "days", "=", "1", ")", "return", "self", ".", "_deadline" ]
Return next day as deadline if no deadline provided.
[ "Return", "next", "day", "as", "deadline", "if", "no", "deadline", "provided", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L110-L114
theonion/django-bulbs
bulbs/contributions/email.py
EmailReport.send_contributor_email
def send_contributor_email(self, contributor): """Send an EmailMessage object for a given contributor.""" ContributorReport( contributor, month=self.month, year=self.year, deadline=self._deadline, start=self._start, end=self._end ).send()
python
def send_contributor_email(self, contributor): """Send an EmailMessage object for a given contributor.""" ContributorReport( contributor, month=self.month, year=self.year, deadline=self._deadline, start=self._start, end=self._end ).send()
[ "def", "send_contributor_email", "(", "self", ",", "contributor", ")", ":", "ContributorReport", "(", "contributor", ",", "month", "=", "self", ".", "month", ",", "year", "=", "self", ".", "year", ",", "deadline", "=", "self", ".", "_deadline", ",", "start...
Send an EmailMessage object for a given contributor.
[ "Send", "an", "EmailMessage", "object", "for", "a", "given", "contributor", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L149-L158
theonion/django-bulbs
bulbs/contributions/email.py
EmailReport.send_mass_contributor_emails
def send_mass_contributor_emails(self): """Send report email to all relevant contributors.""" # If the report configuration is not active we only send to the debugging user. for contributor in self.contributors: if contributor.email not in EMAIL_SETTINGS.get("EXCLUDED", []): self.send_contributor_email(contributor)
python
def send_mass_contributor_emails(self): """Send report email to all relevant contributors.""" # If the report configuration is not active we only send to the debugging user. for contributor in self.contributors: if contributor.email not in EMAIL_SETTINGS.get("EXCLUDED", []): self.send_contributor_email(contributor)
[ "def", "send_mass_contributor_emails", "(", "self", ")", ":", "# If the report configuration is not active we only send to the debugging user.", "for", "contributor", "in", "self", ".", "contributors", ":", "if", "contributor", ".", "email", "not", "in", "EMAIL_SETTINGS", "...
Send report email to all relevant contributors.
[ "Send", "report", "email", "to", "all", "relevant", "contributors", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L160-L165
theonion/django-bulbs
bulbs/contributions/email.py
EmailReport.get_contributors
def get_contributors(self): """Return a list of contributors with contributions between the start/end dates.""" return User.objects.filter( freelanceprofile__is_freelance=True ).filter( contributions__content__published__gte=self.start, contributions__content__published__lt=self.end ).distinct()
python
def get_contributors(self): """Return a list of contributors with contributions between the start/end dates.""" return User.objects.filter( freelanceprofile__is_freelance=True ).filter( contributions__content__published__gte=self.start, contributions__content__published__lt=self.end ).distinct()
[ "def", "get_contributors", "(", "self", ")", ":", "return", "User", ".", "objects", ".", "filter", "(", "freelanceprofile__is_freelance", "=", "True", ")", ".", "filter", "(", "contributions__content__published__gte", "=", "self", ".", "start", ",", "contributions...
Return a list of contributors with contributions between the start/end dates.
[ "Return", "a", "list", "of", "contributors", "with", "contributions", "between", "the", "start", "/", "end", "dates", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L167-L174
theonion/django-bulbs
bulbs/contributions/email.py
EmailReport.contributors
def contributors(self): """Property to retrieve or access the list of contributors.""" if not self._contributors: self._contributors = self.get_contributors() return self._contributors
python
def contributors(self): """Property to retrieve or access the list of contributors.""" if not self._contributors: self._contributors = self.get_contributors() return self._contributors
[ "def", "contributors", "(", "self", ")", ":", "if", "not", "self", ".", "_contributors", ":", "self", ".", "_contributors", "=", "self", ".", "get_contributors", "(", ")", "return", "self", ".", "_contributors" ]
Property to retrieve or access the list of contributors.
[ "Property", "to", "retrieve", "or", "access", "the", "list", "of", "contributors", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L177-L181
bioasp/caspo
caspo/control.py
Controller.control
def control(self, size=0, configure=None): """ Finds all inclusion-minimal intervention strategies up to the given size. Intervention strategies found are saved in the attribute :attr:`strategies` as a :class:`caspo.core.clamping.ClampingList` object instance. Example:: >>> from caspo import core, control >>> networks = core.LogicalNetworkList.from_csv('networks.csv') >>> scenarios = control.ScenarioList('scenarios.csv') >>> controller = control.Controller(networks, scenarios) >>> controller.control() >>> controller.strategies.to_csv('strategies.csv') Parameters ---------- size : int Maximum number of intervention per intervention strategy configure : callable Callable object responsible of setting clingo configuration """ self._strategies = [] clingo = gringo.Control(['-c maxsize=%s' % size]) clingo.conf.solve.models = '0' if configure: def overwrite(args, proxy): for i in xrange(args.threads): proxy.solver[i].no_lookback = 'false' proxy.solver[i].heuristic = 'domain' proxy.solver[i].dom_mod = '5,16' configure(clingo.conf, overwrite) else: clingo.conf.solver.no_lookback = 'false' clingo.conf.solver.heuristic = 'domain' clingo.conf.solver.dom_mod = '5,16' clingo.conf.solve.enum_mode = 'domRec' clingo.add("base", [], self.instance) clingo.load(self.encodings['control']) clingo.ground([("base", [])]) clingo.solve(on_model=self.__save__) self.stats['time_optimum'] = clingo.stats['time_solve'] self.stats['time_enumeration'] = clingo.stats['time_total'] self._logger.info("%s optimal intervention strategies found in %.4fs", len(self._strategies), self.stats['time_enumeration']) self.strategies = core.ClampingList(self._strategies)
python
def control(self, size=0, configure=None): """ Finds all inclusion-minimal intervention strategies up to the given size. Intervention strategies found are saved in the attribute :attr:`strategies` as a :class:`caspo.core.clamping.ClampingList` object instance. Example:: >>> from caspo import core, control >>> networks = core.LogicalNetworkList.from_csv('networks.csv') >>> scenarios = control.ScenarioList('scenarios.csv') >>> controller = control.Controller(networks, scenarios) >>> controller.control() >>> controller.strategies.to_csv('strategies.csv') Parameters ---------- size : int Maximum number of intervention per intervention strategy configure : callable Callable object responsible of setting clingo configuration """ self._strategies = [] clingo = gringo.Control(['-c maxsize=%s' % size]) clingo.conf.solve.models = '0' if configure: def overwrite(args, proxy): for i in xrange(args.threads): proxy.solver[i].no_lookback = 'false' proxy.solver[i].heuristic = 'domain' proxy.solver[i].dom_mod = '5,16' configure(clingo.conf, overwrite) else: clingo.conf.solver.no_lookback = 'false' clingo.conf.solver.heuristic = 'domain' clingo.conf.solver.dom_mod = '5,16' clingo.conf.solve.enum_mode = 'domRec' clingo.add("base", [], self.instance) clingo.load(self.encodings['control']) clingo.ground([("base", [])]) clingo.solve(on_model=self.__save__) self.stats['time_optimum'] = clingo.stats['time_solve'] self.stats['time_enumeration'] = clingo.stats['time_total'] self._logger.info("%s optimal intervention strategies found in %.4fs", len(self._strategies), self.stats['time_enumeration']) self.strategies = core.ClampingList(self._strategies)
[ "def", "control", "(", "self", ",", "size", "=", "0", ",", "configure", "=", "None", ")", ":", "self", ".", "_strategies", "=", "[", "]", "clingo", "=", "gringo", ".", "Control", "(", "[", "'-c maxsize=%s'", "%", "size", "]", ")", "clingo", ".", "c...
Finds all inclusion-minimal intervention strategies up to the given size. Intervention strategies found are saved in the attribute :attr:`strategies` as a :class:`caspo.core.clamping.ClampingList` object instance. Example:: >>> from caspo import core, control >>> networks = core.LogicalNetworkList.from_csv('networks.csv') >>> scenarios = control.ScenarioList('scenarios.csv') >>> controller = control.Controller(networks, scenarios) >>> controller.control() >>> controller.strategies.to_csv('strategies.csv') Parameters ---------- size : int Maximum number of intervention per intervention strategy configure : callable Callable object responsible of setting clingo configuration
[ "Finds", "all", "inclusion", "-", "minimal", "intervention", "strategies", "up", "to", "the", "given", "size", ".", "Intervention", "strategies", "found", "are", "saved", "in", "the", "attribute", ":", "attr", ":", "strategies", "as", "a", ":", "class", ":",...
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/control.py#L139-L196
gtzampanakis/downloader
downloader.py
Downloader.open_url
def open_url(self, url, stale_after, parse_as_html = True, **kwargs): """ Download or retrieve from cache. url -- The URL to be downloaded, as a string. stale_after -- A network request for the url will be performed if the cached copy does not exist or if it exists but its age (in days) is larger or equal to the stale_after value. A non-positive value will force re-download. parse_as_html -- Parse the resource downloaded as HTML. This uses the lxml.html package to parse the resource leniently, thus it will not fail even for reasonably invalid HTML. This argument also decides the return type of this method; if True, then the return type is an ElementTree.Element root object; if False, the content of the resource is returned as a bytestring. Exceptions raised: BannedException -- If does_show_ban returns True. HTTPCodeNotOKError -- If the returned HTTP status code is not equal to 200. """ _LOGGER.info('open_url() received url: %s', url) today = datetime.date.today() threshold_date = today - datetime.timedelta(stale_after) downloaded = False with self._get_conn() as conn: rs = conn.execute(''' select content from cache where url = ? and date > ? ''', (url, _date_to_sqlite_str(threshold_date)) ) row = rs.fetchone() retry_run = kwargs.get('retry_run', False) assert (not retry_run) or (retry_run and row is None) if row is None: file_obj = self._download(url).get_file_obj() downloaded = True else: file_obj = cStringIO.StringIO(zlib.decompress(row[0])) if parse_as_html: tree = lxml.html.parse(file_obj) tree.getroot().url = url appears_to_be_banned = False if self.does_show_ban(tree.getroot()): appears_to_be_banned = True if downloaded: message = ('Function {f} claims we have been banned, ' 'it was called with an element parsed from url ' '(downloaded, not from cache): {u}' .format(f = self.does_show_ban, u = url)) _LOGGER.error(message) _LOGGER.info('Deleting url %s from the cache (if it exists) ' 'because it triggered ban page cache poisoning ' 'exception', url) with self._get_conn() as conn: conn.execute('delete from cache where url = ?', [str(url)]) if downloaded: raise BannedException(message) else: return self.open_url(url, stale_after, retry_run = True) else: tree = file_obj.read() if downloaded: # make_links_absolute should only be called when the document has a base_url # attribute, which it has not when it has been loaded from the database. So, # this "if" is needed: if parse_as_html: tree.getroot().make_links_absolute(tree.getroot().base_url) to_store = lxml.html.tostring( tree, pretty_print = True, encoding = 'utf-8' ) else: to_store = tree to_store = zlib.compress(to_store, 8) with self._get_conn() as conn: conn.execute(''' insert or replace into cache (url, date, content) values (?, ?, ?) ''', ( str(url), _date_to_sqlite_str(today), sqlite3.Binary(to_store) ) ) return tree
python
def open_url(self, url, stale_after, parse_as_html = True, **kwargs): """ Download or retrieve from cache. url -- The URL to be downloaded, as a string. stale_after -- A network request for the url will be performed if the cached copy does not exist or if it exists but its age (in days) is larger or equal to the stale_after value. A non-positive value will force re-download. parse_as_html -- Parse the resource downloaded as HTML. This uses the lxml.html package to parse the resource leniently, thus it will not fail even for reasonably invalid HTML. This argument also decides the return type of this method; if True, then the return type is an ElementTree.Element root object; if False, the content of the resource is returned as a bytestring. Exceptions raised: BannedException -- If does_show_ban returns True. HTTPCodeNotOKError -- If the returned HTTP status code is not equal to 200. """ _LOGGER.info('open_url() received url: %s', url) today = datetime.date.today() threshold_date = today - datetime.timedelta(stale_after) downloaded = False with self._get_conn() as conn: rs = conn.execute(''' select content from cache where url = ? and date > ? ''', (url, _date_to_sqlite_str(threshold_date)) ) row = rs.fetchone() retry_run = kwargs.get('retry_run', False) assert (not retry_run) or (retry_run and row is None) if row is None: file_obj = self._download(url).get_file_obj() downloaded = True else: file_obj = cStringIO.StringIO(zlib.decompress(row[0])) if parse_as_html: tree = lxml.html.parse(file_obj) tree.getroot().url = url appears_to_be_banned = False if self.does_show_ban(tree.getroot()): appears_to_be_banned = True if downloaded: message = ('Function {f} claims we have been banned, ' 'it was called with an element parsed from url ' '(downloaded, not from cache): {u}' .format(f = self.does_show_ban, u = url)) _LOGGER.error(message) _LOGGER.info('Deleting url %s from the cache (if it exists) ' 'because it triggered ban page cache poisoning ' 'exception', url) with self._get_conn() as conn: conn.execute('delete from cache where url = ?', [str(url)]) if downloaded: raise BannedException(message) else: return self.open_url(url, stale_after, retry_run = True) else: tree = file_obj.read() if downloaded: # make_links_absolute should only be called when the document has a base_url # attribute, which it has not when it has been loaded from the database. So, # this "if" is needed: if parse_as_html: tree.getroot().make_links_absolute(tree.getroot().base_url) to_store = lxml.html.tostring( tree, pretty_print = True, encoding = 'utf-8' ) else: to_store = tree to_store = zlib.compress(to_store, 8) with self._get_conn() as conn: conn.execute(''' insert or replace into cache (url, date, content) values (?, ?, ?) ''', ( str(url), _date_to_sqlite_str(today), sqlite3.Binary(to_store) ) ) return tree
[ "def", "open_url", "(", "self", ",", "url", ",", "stale_after", ",", "parse_as_html", "=", "True", ",", "*", "*", "kwargs", ")", ":", "_LOGGER", ".", "info", "(", "'open_url() received url: %s'", ",", "url", ")", "today", "=", "datetime", ".", "date", "....
Download or retrieve from cache. url -- The URL to be downloaded, as a string. stale_after -- A network request for the url will be performed if the cached copy does not exist or if it exists but its age (in days) is larger or equal to the stale_after value. A non-positive value will force re-download. parse_as_html -- Parse the resource downloaded as HTML. This uses the lxml.html package to parse the resource leniently, thus it will not fail even for reasonably invalid HTML. This argument also decides the return type of this method; if True, then the return type is an ElementTree.Element root object; if False, the content of the resource is returned as a bytestring. Exceptions raised: BannedException -- If does_show_ban returns True. HTTPCodeNotOKError -- If the returned HTTP status code is not equal to 200.
[ "Download", "or", "retrieve", "from", "cache", ".", "url", "--", "The", "URL", "to", "be", "downloaded", "as", "a", "string", ".", "stale_after", "--", "A", "network", "request", "for", "the", "url", "will", "be", "performed", "if", "the", "cached", "cop...
train
https://github.com/gtzampanakis/downloader/blob/7354f68adc72f2bfc472f41596af6ee8b3e6ea88/downloader.py#L192-L297
gtzampanakis/downloader
downloader.py
CrawlElement.xpath_pick_one
def xpath_pick_one(self, xpaths): """ Try each of the xpaths successively until a single element is found. If no xpath succeeds then raise the last UnexpectedContentException caught. """ for xpathi, xpath in enumerate(xpaths): try: return self.xpath(xpath, [1, 1])[0] except UnexpectedContentException as e: if xpathi == len(xpaths) - 1: raise
python
def xpath_pick_one(self, xpaths): """ Try each of the xpaths successively until a single element is found. If no xpath succeeds then raise the last UnexpectedContentException caught. """ for xpathi, xpath in enumerate(xpaths): try: return self.xpath(xpath, [1, 1])[0] except UnexpectedContentException as e: if xpathi == len(xpaths) - 1: raise
[ "def", "xpath_pick_one", "(", "self", ",", "xpaths", ")", ":", "for", "xpathi", ",", "xpath", "in", "enumerate", "(", "xpaths", ")", ":", "try", ":", "return", "self", ".", "xpath", "(", "xpath", ",", "[", "1", ",", "1", "]", ")", "[", "0", "]", ...
Try each of the xpaths successively until a single element is found. If no xpath succeeds then raise the last UnexpectedContentException caught.
[ "Try", "each", "of", "the", "xpaths", "successively", "until", "a", "single", "element", "is", "found", ".", "If", "no", "xpath", "succeeds", "then", "raise", "the", "last", "UnexpectedContentException", "caught", "." ]
train
https://github.com/gtzampanakis/downloader/blob/7354f68adc72f2bfc472f41596af6ee8b3e6ea88/downloader.py#L339-L350
theonion/django-bulbs
bulbs/promotion/serializers.py
_PZoneOperationSerializer.get_content_title
def get_content_title(self, obj): """Get content's title.""" return Content.objects.get(id=obj.content.id).title
python
def get_content_title(self, obj): """Get content's title.""" return Content.objects.get(id=obj.content.id).title
[ "def", "get_content_title", "(", "self", ",", "obj", ")", ":", "return", "Content", ".", "objects", ".", "get", "(", "id", "=", "obj", ".", "content", ".", "id", ")", ".", "title" ]
Get content's title.
[ "Get", "content", "s", "title", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/promotion/serializers.py#L51-L53
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/dates.py
Dates.is_date
def is_date(self): """Determine if a data record is of type DATE.""" dt = DATA_TYPES['date'] if type(self.data) is dt['type'] and '-' in str(self.data) and str(self.data).count('-') == 2: # Separate year, month and day date_split = str(self.data).split('-') y, m, d = date_split[0], date_split[1], date_split[2] # Validate values valid_year, valid_months, valid_days = int(y) in YEARS, int(m) in MONTHS, int(d) in DAYS # Check that all validations are True if all(i is True for i in (valid_year, valid_months, valid_days)): self.type = 'date'.upper() self.len = None return True
python
def is_date(self): """Determine if a data record is of type DATE.""" dt = DATA_TYPES['date'] if type(self.data) is dt['type'] and '-' in str(self.data) and str(self.data).count('-') == 2: # Separate year, month and day date_split = str(self.data).split('-') y, m, d = date_split[0], date_split[1], date_split[2] # Validate values valid_year, valid_months, valid_days = int(y) in YEARS, int(m) in MONTHS, int(d) in DAYS # Check that all validations are True if all(i is True for i in (valid_year, valid_months, valid_days)): self.type = 'date'.upper() self.len = None return True
[ "def", "is_date", "(", "self", ")", ":", "dt", "=", "DATA_TYPES", "[", "'date'", "]", "if", "type", "(", "self", ".", "data", ")", "is", "dt", "[", "'type'", "]", "and", "'-'", "in", "str", "(", "self", ".", "data", ")", "and", "str", "(", "sel...
Determine if a data record is of type DATE.
[ "Determine", "if", "a", "data", "record", "is", "of", "type", "DATE", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/dates.py#L19-L34
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/dates.py
Dates.is_time
def is_time(self): """Determine if a data record is of type TIME.""" dt = DATA_TYPES['time'] if type(self.data) is dt['type'] and ':' in str(self.data) and str(self.data).count(':') == 2: # Separate hour, month, second date_split = str(self.data).split(':') h, m, s = date_split[0], date_split[1], date_split[2] # Validate values valid_hour, valid_min, valid_sec = int(h) in HOURS, int(m) in MINUTES, int(float(s)) in SECONDS if all(i is True for i in (valid_hour, valid_min, valid_sec)): self.type = 'time'.upper() self.len = None return True
python
def is_time(self): """Determine if a data record is of type TIME.""" dt = DATA_TYPES['time'] if type(self.data) is dt['type'] and ':' in str(self.data) and str(self.data).count(':') == 2: # Separate hour, month, second date_split = str(self.data).split(':') h, m, s = date_split[0], date_split[1], date_split[2] # Validate values valid_hour, valid_min, valid_sec = int(h) in HOURS, int(m) in MINUTES, int(float(s)) in SECONDS if all(i is True for i in (valid_hour, valid_min, valid_sec)): self.type = 'time'.upper() self.len = None return True
[ "def", "is_time", "(", "self", ")", ":", "dt", "=", "DATA_TYPES", "[", "'time'", "]", "if", "type", "(", "self", ".", "data", ")", "is", "dt", "[", "'type'", "]", "and", "':'", "in", "str", "(", "self", ".", "data", ")", "and", "str", "(", "sel...
Determine if a data record is of type TIME.
[ "Determine", "if", "a", "data", "record", "is", "of", "type", "TIME", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/dates.py#L40-L54
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/dates.py
Dates.is_year
def is_year(self): """Determine if a data record is of type YEAR.""" dt = DATA_TYPES['year'] if dt['min'] and dt['max']: if type(self.data) is dt['type'] and dt['min'] < self.data < dt['max']: self.type = 'year'.upper() self.len = None return True
python
def is_year(self): """Determine if a data record is of type YEAR.""" dt = DATA_TYPES['year'] if dt['min'] and dt['max']: if type(self.data) is dt['type'] and dt['min'] < self.data < dt['max']: self.type = 'year'.upper() self.len = None return True
[ "def", "is_year", "(", "self", ")", ":", "dt", "=", "DATA_TYPES", "[", "'year'", "]", "if", "dt", "[", "'min'", "]", "and", "dt", "[", "'max'", "]", ":", "if", "type", "(", "self", ".", "data", ")", "is", "dt", "[", "'type'", "]", "and", "dt", ...
Determine if a data record is of type YEAR.
[ "Determine", "if", "a", "data", "record", "is", "of", "type", "YEAR", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/dates.py#L56-L63
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/dates.py
Dates._is_date_data
def _is_date_data(self, data_type): """Private method for determining if a data record is of type DATE.""" dt = DATA_TYPES[data_type] if isinstance(self.data, dt['type']): self.type = data_type.upper() self.len = None return True
python
def _is_date_data(self, data_type): """Private method for determining if a data record is of type DATE.""" dt = DATA_TYPES[data_type] if isinstance(self.data, dt['type']): self.type = data_type.upper() self.len = None return True
[ "def", "_is_date_data", "(", "self", ",", "data_type", ")", ":", "dt", "=", "DATA_TYPES", "[", "data_type", "]", "if", "isinstance", "(", "self", ".", "data", ",", "dt", "[", "'type'", "]", ")", ":", "self", ".", "type", "=", "data_type", ".", "upper...
Private method for determining if a data record is of type DATE.
[ "Private", "method", "for", "determining", "if", "a", "data", "record", "is", "of", "type", "DATE", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/dates.py#L65-L71
saghul/evergreen
evergreen/locks.py
Barrier.wait
def wait(self, timeout=None): """Wait for the barrier. When the specified number of threads have started waiting, they are all simultaneously awoken. If an 'action' was provided for the barrier, one of the threads will have executed that callback prior to returning. Returns an individual index number from 0 to 'parties-1'. """ if timeout is None: timeout = self._timeout with self._cond: self._enter() # Block while the barrier drains. index = self._count self._count += 1 try: if index + 1 == self._parties: # We release the barrier self._release() else: # We wait until someone releases us self._wait(timeout) return index finally: self._count -= 1 # Wake up any threads waiting for barrier to drain. self._exit()
python
def wait(self, timeout=None): """Wait for the barrier. When the specified number of threads have started waiting, they are all simultaneously awoken. If an 'action' was provided for the barrier, one of the threads will have executed that callback prior to returning. Returns an individual index number from 0 to 'parties-1'. """ if timeout is None: timeout = self._timeout with self._cond: self._enter() # Block while the barrier drains. index = self._count self._count += 1 try: if index + 1 == self._parties: # We release the barrier self._release() else: # We wait until someone releases us self._wait(timeout) return index finally: self._count -= 1 # Wake up any threads waiting for barrier to drain. self._exit()
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "_timeout", "with", "self", ".", "_cond", ":", "self", ".", "_enter", "(", ")", "# Block while the barrier drains.", "inde...
Wait for the barrier. When the specified number of threads have started waiting, they are all simultaneously awoken. If an 'action' was provided for the barrier, one of the threads will have executed that callback prior to returning. Returns an individual index number from 0 to 'parties-1'.
[ "Wait", "for", "the", "barrier", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/locks.py#L251-L277
saghul/evergreen
evergreen/locks.py
Barrier.reset
def reset(self): """Reset the barrier to the initial state. Any threads currently waiting will get the BrokenBarrier exception raised. """ with self._cond: if self._count > 0: if self._state == 0: #reset the barrier, waking up threads self._state = -1 elif self._state == -2: #was broken, set it to reset state #which clears when the last thread exits self._state = -1 else: self._state = 0 self._cond.notify_all()
python
def reset(self): """Reset the barrier to the initial state. Any threads currently waiting will get the BrokenBarrier exception raised. """ with self._cond: if self._count > 0: if self._state == 0: #reset the barrier, waking up threads self._state = -1 elif self._state == -2: #was broken, set it to reset state #which clears when the last thread exits self._state = -1 else: self._state = 0 self._cond.notify_all()
[ "def", "reset", "(", "self", ")", ":", "with", "self", ".", "_cond", ":", "if", "self", ".", "_count", ">", "0", ":", "if", "self", ".", "_state", "==", "0", ":", "#reset the barrier, waking up threads", "self", ".", "_state", "=", "-", "1", "elif", ...
Reset the barrier to the initial state. Any threads currently waiting will get the BrokenBarrier exception raised.
[ "Reset", "the", "barrier", "to", "the", "initial", "state", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/locks.py#L324-L342
theonion/django-bulbs
bulbs/content/views.py
BaseContentDetailView.get
def get(self, request, *args, **kwargs): """Override default get function to use token if there is one to retrieve object. If a subclass should use their own GET implementation, token_from_kwargs should be called if that detail view should be accessible via token.""" self.object = self.get_object() allow_anonymous = kwargs.get("allow_anonymous", False) # We only want to redirect is that setting is true, and we're not allowing anonymous users if self.redirect_correct_path and not allow_anonymous: # Also we obviously only want to redirect if the URL is wrong if self.request.path != self.object.get_absolute_url(): return HttpResponsePermanentRedirect(self.object.get_absolute_url()) context = self.get_context_data(object=self.object) response = self.render_to_response(context) # If we have an unpublished article.... if self.object.published is None or self.object.published > timezone.now(): # And the user doesn't have permission to view this if not request.user.is_staff and not allow_anonymous: response = redirect_unpublished_to_login_or_404( request=request, next_url=self.object.get_absolute_url(), next_params=request.GET) # Never cache unpublished articles add_never_cache_headers(response) else: response["Vary"] = "Accept-Encoding" return response
python
def get(self, request, *args, **kwargs): """Override default get function to use token if there is one to retrieve object. If a subclass should use their own GET implementation, token_from_kwargs should be called if that detail view should be accessible via token.""" self.object = self.get_object() allow_anonymous = kwargs.get("allow_anonymous", False) # We only want to redirect is that setting is true, and we're not allowing anonymous users if self.redirect_correct_path and not allow_anonymous: # Also we obviously only want to redirect if the URL is wrong if self.request.path != self.object.get_absolute_url(): return HttpResponsePermanentRedirect(self.object.get_absolute_url()) context = self.get_context_data(object=self.object) response = self.render_to_response(context) # If we have an unpublished article.... if self.object.published is None or self.object.published > timezone.now(): # And the user doesn't have permission to view this if not request.user.is_staff and not allow_anonymous: response = redirect_unpublished_to_login_or_404( request=request, next_url=self.object.get_absolute_url(), next_params=request.GET) # Never cache unpublished articles add_never_cache_headers(response) else: response["Vary"] = "Accept-Encoding" return response
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "object", "=", "self", ".", "get_object", "(", ")", "allow_anonymous", "=", "kwargs", ".", "get", "(", "\"allow_anonymous\"", ",", "False", "...
Override default get function to use token if there is one to retrieve object. If a subclass should use their own GET implementation, token_from_kwargs should be called if that detail view should be accessible via token.
[ "Override", "default", "get", "function", "to", "use", "token", "if", "there", "is", "one", "to", "retrieve", "object", ".", "If", "a", "subclass", "should", "use", "their", "own", "GET", "implementation", "token_from_kwargs", "should", "be", "called", "if", ...
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/views.py#L158-L192
20c/vodka
vodka/config/validators.py
host
def host(value): """ Validates that the value is a valid network location """ if not value: return (True, "") try: host,port = value.split(":") except ValueError as _: return (False, "value needs to be <host>:<port>") try: int(port) except ValueError as _: return (False, "port component of the host address needs to be a number") return (True, "")
python
def host(value): """ Validates that the value is a valid network location """ if not value: return (True, "") try: host,port = value.split(":") except ValueError as _: return (False, "value needs to be <host>:<port>") try: int(port) except ValueError as _: return (False, "port component of the host address needs to be a number") return (True, "")
[ "def", "host", "(", "value", ")", ":", "if", "not", "value", ":", "return", "(", "True", ",", "\"\"", ")", "try", ":", "host", ",", "port", "=", "value", ".", "split", "(", "\":\"", ")", "except", "ValueError", "as", "_", ":", "return", "(", "Fal...
Validates that the value is a valid network location
[ "Validates", "that", "the", "value", "is", "a", "valid", "network", "location" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/config/validators.py#L19-L33
bharadwaj-raju/libdesktop
libdesktop/wallpaper.py
get_wallpaper
def get_wallpaper(): '''Get the desktop wallpaper. Get the current desktop wallpaper. Returns: str: The path to the current wallpaper. ''' desktop_env = system.get_name() if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']: SCHEMA = 'org.gnome.desktop.background' KEY = 'picture-uri' if desktop_env == 'mate': SCHEMA = 'org.mate.background' KEY = 'picture-filename' try: from gi.repository import Gio gsettings = Gio.Settings.new(SCHEMA) return gsettings.get_string(KEY).replace('file://', '') except ImportError: try: return system.get_cmd_out( ['gsettings', 'get', SCHEMA, KEY]).replace('file://', '') except: # MATE < 1.6 return system.get_cmd_out( ['mateconftool-2', '-t', 'string', '--get', '/desktop/mate/background/picture_filename'] ).replace('file://', '') elif desktop_env == 'gnome2': args = ['gconftool-2', '-t', 'string', '--get', '/desktop/gnome/background/picture_filename'] return system.get_cmd_out(args).replace('file://', '') elif desktop_env == 'kde': conf_file = directories.get_config_file( 'plasma-org.kde.plasma.desktop-appletsrc')[0] with open(conf_file) as f: contents = f.read() contents = contents.splitlines() contents = contents[ contents.index( '[Containments][8][Wallpaper][org.kde.image][General]') + 1].split( '=', 1 ) return contents[len(contents) - 1].strip().replace('file://', '') elif desktop_env == 'xfce4': # XFCE4's image property is not image-path but last-image (What?) list_of_properties = system.get_cmd_out( ['xfconf-query', '-R', '-l', '-c', 'xfce4-desktop', '-p', '/backdrop']) for i in list_of_properties.split('\n'): if i.endswith('last-image') and 'workspace' in i: # The property given is a background property return system.get_cmd_out( ['xfconf-query', '-c', 'xfce4-desktop', '-p', i]) elif desktop_env == 'razor-qt': desktop_conf = configparser.ConfigParser() # Development version desktop_conf_file = os.path.join( get_config_dir('razor')[0], 'desktop.conf') if os.path.isfile(desktop_conf_file): config_option = r'screens\1\desktops\1\wallpaper' else: desktop_conf_file = os.path.join( os.path.expanduser('~'), '.razor/desktop.conf') config_option = r'desktops\1\wallpaper' desktop_conf.read(os.path.join(desktop_conf_file)) try: if desktop_conf.has_option('razor', config_option): return desktop_conf.get('razor', config_option) except: pass elif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']: # feh stores last feh command in '~/.fehbg' # parse it with open(os.path.expanduser('~/.fehbg')) as f: fehbg = f.read() fehbg = fehbg.split('\n') for line in fehbg: if '#!' in line: fehbg.remove(line) fehbg = fehbg[0] for i in fehbg.split(' '): if not i.startswith("-"): if not i.startswith('feh'): if not i in ['', ' ', ' ', '\n']: return(i.replace("'", '')) # TODO: way to get wallpaper for desktops which are commented-out below elif desktop_env == 'icewm': with open(os.path.expanduser('~/.icewm/preferences')) as f: for line in f: if line.startswith('DesktopBackgroundImage'): return os.path.expanduser(line.strip().split( '=', 1)[1].strip().replace('"', '').replace("'", '')) elif desktop_env == 'awesome': conf_file = os.path.join( directories.get_config_dir('awesome')[0], 'rc.lua') with open(conf_file) as f: for line in f: if line.startswith('theme_path'): awesome_theme = line.strip().split('=', 1) awesome_theme = awesome_theme[ len(awesome_theme) - 1].strip().replace( '"', '').replace( "'", '') with open(os.path.expanduser(awesome_theme)) as f: for line in f: if line.startswith('theme.wallpaper'): awesome_wallpaper = line.strip().split('=', 1) awesome_wallpaper = awesome_wallpaper[ len(awesome_wallpaper) - 1].strip().replace( '"', '').replace( "'", '') return os.path.expanduser(awesome_wallpaper) # elif desktop_env == 'blackbox': # args = ['bsetbg', '-full', image] # sp.Popen(args) # # elif desktop_env == 'lxde': # args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image # sp.Popen(args, shell=True) # # elif desktop_env == 'lxqt': # args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image # sp.Popen(args, shell=True) # # elif desktop_env == 'windowmaker': # args = 'wmsetbg -s -u %s' % image # sp.Popen(args, shell=True) # # elif desktop_env == 'enlightenment': # args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image # sp.Popen(args, shell=True) # # elif desktop_env == 'awesome': # with sp.Popen("awesome-client", stdin=sp.PIPE) as awesome_client: # command = 'local gears = require("gears"); for s = 1, screen.count() # do gears.wallpaper.maximized("%s", s, true); end;' % image # awesome_client.communicate(input=bytes(command, 'UTF-8')) elif desktop_env == 'windows': WINDOWS_SCRIPT = ('reg query "HKEY_CURRENT_USER\Control' ' Panel\Desktop\Desktop"') return system.get_cmd_out(WINDOWS_SCRIPT) elif desktop_env == 'mac': try: from appscript import app app('Finder').desktop_picture.get() except ImportError: OSX_SCRIPT = ('tell app "finder" to get posix path' ' of (get desktop picture as alias)') return system.get_cmd_out(['osascript', OSX_SCRIPT])
python
def get_wallpaper(): '''Get the desktop wallpaper. Get the current desktop wallpaper. Returns: str: The path to the current wallpaper. ''' desktop_env = system.get_name() if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']: SCHEMA = 'org.gnome.desktop.background' KEY = 'picture-uri' if desktop_env == 'mate': SCHEMA = 'org.mate.background' KEY = 'picture-filename' try: from gi.repository import Gio gsettings = Gio.Settings.new(SCHEMA) return gsettings.get_string(KEY).replace('file://', '') except ImportError: try: return system.get_cmd_out( ['gsettings', 'get', SCHEMA, KEY]).replace('file://', '') except: # MATE < 1.6 return system.get_cmd_out( ['mateconftool-2', '-t', 'string', '--get', '/desktop/mate/background/picture_filename'] ).replace('file://', '') elif desktop_env == 'gnome2': args = ['gconftool-2', '-t', 'string', '--get', '/desktop/gnome/background/picture_filename'] return system.get_cmd_out(args).replace('file://', '') elif desktop_env == 'kde': conf_file = directories.get_config_file( 'plasma-org.kde.plasma.desktop-appletsrc')[0] with open(conf_file) as f: contents = f.read() contents = contents.splitlines() contents = contents[ contents.index( '[Containments][8][Wallpaper][org.kde.image][General]') + 1].split( '=', 1 ) return contents[len(contents) - 1].strip().replace('file://', '') elif desktop_env == 'xfce4': # XFCE4's image property is not image-path but last-image (What?) list_of_properties = system.get_cmd_out( ['xfconf-query', '-R', '-l', '-c', 'xfce4-desktop', '-p', '/backdrop']) for i in list_of_properties.split('\n'): if i.endswith('last-image') and 'workspace' in i: # The property given is a background property return system.get_cmd_out( ['xfconf-query', '-c', 'xfce4-desktop', '-p', i]) elif desktop_env == 'razor-qt': desktop_conf = configparser.ConfigParser() # Development version desktop_conf_file = os.path.join( get_config_dir('razor')[0], 'desktop.conf') if os.path.isfile(desktop_conf_file): config_option = r'screens\1\desktops\1\wallpaper' else: desktop_conf_file = os.path.join( os.path.expanduser('~'), '.razor/desktop.conf') config_option = r'desktops\1\wallpaper' desktop_conf.read(os.path.join(desktop_conf_file)) try: if desktop_conf.has_option('razor', config_option): return desktop_conf.get('razor', config_option) except: pass elif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']: # feh stores last feh command in '~/.fehbg' # parse it with open(os.path.expanduser('~/.fehbg')) as f: fehbg = f.read() fehbg = fehbg.split('\n') for line in fehbg: if '#!' in line: fehbg.remove(line) fehbg = fehbg[0] for i in fehbg.split(' '): if not i.startswith("-"): if not i.startswith('feh'): if not i in ['', ' ', ' ', '\n']: return(i.replace("'", '')) # TODO: way to get wallpaper for desktops which are commented-out below elif desktop_env == 'icewm': with open(os.path.expanduser('~/.icewm/preferences')) as f: for line in f: if line.startswith('DesktopBackgroundImage'): return os.path.expanduser(line.strip().split( '=', 1)[1].strip().replace('"', '').replace("'", '')) elif desktop_env == 'awesome': conf_file = os.path.join( directories.get_config_dir('awesome')[0], 'rc.lua') with open(conf_file) as f: for line in f: if line.startswith('theme_path'): awesome_theme = line.strip().split('=', 1) awesome_theme = awesome_theme[ len(awesome_theme) - 1].strip().replace( '"', '').replace( "'", '') with open(os.path.expanduser(awesome_theme)) as f: for line in f: if line.startswith('theme.wallpaper'): awesome_wallpaper = line.strip().split('=', 1) awesome_wallpaper = awesome_wallpaper[ len(awesome_wallpaper) - 1].strip().replace( '"', '').replace( "'", '') return os.path.expanduser(awesome_wallpaper) # elif desktop_env == 'blackbox': # args = ['bsetbg', '-full', image] # sp.Popen(args) # # elif desktop_env == 'lxde': # args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image # sp.Popen(args, shell=True) # # elif desktop_env == 'lxqt': # args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image # sp.Popen(args, shell=True) # # elif desktop_env == 'windowmaker': # args = 'wmsetbg -s -u %s' % image # sp.Popen(args, shell=True) # # elif desktop_env == 'enlightenment': # args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image # sp.Popen(args, shell=True) # # elif desktop_env == 'awesome': # with sp.Popen("awesome-client", stdin=sp.PIPE) as awesome_client: # command = 'local gears = require("gears"); for s = 1, screen.count() # do gears.wallpaper.maximized("%s", s, true); end;' % image # awesome_client.communicate(input=bytes(command, 'UTF-8')) elif desktop_env == 'windows': WINDOWS_SCRIPT = ('reg query "HKEY_CURRENT_USER\Control' ' Panel\Desktop\Desktop"') return system.get_cmd_out(WINDOWS_SCRIPT) elif desktop_env == 'mac': try: from appscript import app app('Finder').desktop_picture.get() except ImportError: OSX_SCRIPT = ('tell app "finder" to get posix path' ' of (get desktop picture as alias)') return system.get_cmd_out(['osascript', OSX_SCRIPT])
[ "def", "get_wallpaper", "(", ")", ":", "desktop_env", "=", "system", ".", "get_name", "(", ")", "if", "desktop_env", "in", "[", "'gnome'", ",", "'unity'", ",", "'cinnamon'", ",", "'pantheon'", ",", "'mate'", "]", ":", "SCHEMA", "=", "'org.gnome.desktop.backg...
Get the desktop wallpaper. Get the current desktop wallpaper. Returns: str: The path to the current wallpaper.
[ "Get", "the", "desktop", "wallpaper", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/wallpaper.py#L47-L238
bharadwaj-raju/libdesktop
libdesktop/wallpaper.py
set_wallpaper
def set_wallpaper(image): '''Set the desktop wallpaper. Sets the desktop wallpaper to an image. Args: image (str): The path to the image to be set as wallpaper. ''' desktop_env = system.get_name() if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']: uri = 'file://%s' % image SCHEMA = 'org.gnome.desktop.background' KEY = 'picture-uri' if desktop_env == 'mate': uri = image SCHEMA = 'org.mate.background' KEY = 'picture-filename' try: from gi.repository import Gio gsettings = Gio.Settings.new(SCHEMA) gsettings.set_string(KEY, uri) except ImportError: try: gsettings_proc = sp.Popen( ['gsettings', 'set', SCHEMA, KEY, uri]) except: # MATE < 1.6 sp.Popen(['mateconftool-2', '-t', 'string', '--set', '/desktop/mate/background/picture_filename', '%s' % image], stdout=sp.PIPE) finally: gsettings_proc.communicate() if gsettings_proc.returncode != 0: sp.Popen(['mateconftool-2', '-t', 'string', '--set', '/desktop/mate/background/picture_filename', '%s' % image]) elif desktop_env == 'gnome2': sp.Popen( ['gconftool-2', '-t', 'string', '--set', '/desktop/gnome/background/picture_filename', image] ) elif desktop_env == 'kde': # This probably only works in Plasma 5+ kde_script = dedent( '''\ var Desktops = desktops(); for (i=0;i<Desktops.length;i++) {{ d = Desktops[i]; d.wallpaperPlugin = "org.kde.image"; d.currentConfigGroup = Array("Wallpaper", "org.kde.image", "General"); d.writeConfig("Image", "file://{}") }} ''').format(image) sp.Popen( ['dbus-send', '--session', '--dest=org.kde.plasmashell', '--type=method_call', '/PlasmaShell', 'org.kde.PlasmaShell.evaluateScript', 'string:{}'.format(kde_script)] ) elif desktop_env in ['kde3', 'trinity']: args = 'dcop kdesktop KBackgroundIface setWallpaper 0 "%s" 6' % image sp.Popen(args, shell=True) elif desktop_env == 'xfce4': # XFCE4's image property is not image-path but last-image (What?) list_of_properties = system.get_cmd_out( ['xfconf-query', '-R', '-l', '-c', 'xfce4-desktop', '-p', '/backdrop'] ) for i in list_of_properties.split('\n'): if i.endswith('last-image'): # The property given is a background property sp.Popen( ['xfconf-query -c xfce4-desktop -p %s -s "%s"' % (i, image)], shell=True) sp.Popen(['xfdesktop --reload'], shell=True) elif desktop_env == 'razor-qt': desktop_conf = configparser.ConfigParser() # Development version desktop_conf_file = os.path.join( get_config_dir('razor')[0], 'desktop.conf') if os.path.isfile(desktop_conf_file): config_option = r'screens\1\desktops\1\wallpaper' else: desktop_conf_file = os.path.join( os.path.expanduser('~'), '.razor/desktop.conf') config_option = r'desktops\1\wallpaper' desktop_conf.read(os.path.join(desktop_conf_file)) try: if desktop_conf.has_option('razor', config_option): desktop_conf.set('razor', config_option, image) with codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f: desktop_conf.write(f) except: pass elif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']: try: args = ['feh', '--bg-scale', image] sp.Popen(args) except: sys.stderr.write('Error: Failed to set wallpaper with feh!') sys.stderr.write('Please make sre that You have feh installed.') elif desktop_env == 'icewm': args = ['icewmbg', image] sp.Popen(args) elif desktop_env == 'blackbox': args = ['bsetbg', '-full', image] sp.Popen(args) elif desktop_env == 'lxde': args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image sp.Popen(args, shell=True) elif desktop_env == 'lxqt': args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image sp.Popen(args, shell=True) elif desktop_env == 'windowmaker': args = 'wmsetbg -s -u %s' % image sp.Popen(args, shell=True) elif desktop_env == 'enlightenment': args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image sp.Popen(args, shell=True) elif desktop_env == 'awesome': with sp.Popen("awesome-client", stdin=sp.PIPE) as awesome_client: command = ('local gears = require("gears"); for s = 1,' ' screen.count() do gears.wallpaper.maximized' '("%s", s, true); end;') % image awesome_client.communicate(input=bytes(command, 'UTF-8')) elif desktop_env == 'windows': WINDOWS_SCRIPT = dedent(''' reg add "HKEY_CURRENT_USER\Control Panel\Desktop" \ /v Wallpaper /t REG_SZ /d %s /f rundll32.exe user32.dll,UpdatePerUserSystemParameters ''') % image windows_script_file = os.path.join( tempfile.gettempdir(), 'wallscript.bat') with open(windows_script_file, 'w') as f: f.write(WINDOWS_SCRIPT) sp.Popen([windows_script_file], shell=True) # Sometimes the method above works # and sometimes the one below SPI_SETDESKWALLPAPER = 20 ctypes.windll.user32.SystemParametersInfoA( SPI_SETDESKWALLPAPER, 0, image, 0) elif desktop_env == 'mac': try: from appscript import app, mactypes app('Finder').desktop_picture.set(mactypes.File(image)) except ImportError: OSX_SCRIPT = dedent( '''tell application "System Events" set desktopCount to count of desktops repeat with desktopNumber from 1 to desktopCount tell desktop desktopNumber set picture to POSIX file "%s" end tell end repeat end tell''') % image sp.Popen(['osascript', OSX_SCRIPT]) else: try: sp.Popen(['feh', '--bg-scale', image]) # feh is nearly a catch-all for Linux WMs except: pass
python
def set_wallpaper(image): '''Set the desktop wallpaper. Sets the desktop wallpaper to an image. Args: image (str): The path to the image to be set as wallpaper. ''' desktop_env = system.get_name() if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']: uri = 'file://%s' % image SCHEMA = 'org.gnome.desktop.background' KEY = 'picture-uri' if desktop_env == 'mate': uri = image SCHEMA = 'org.mate.background' KEY = 'picture-filename' try: from gi.repository import Gio gsettings = Gio.Settings.new(SCHEMA) gsettings.set_string(KEY, uri) except ImportError: try: gsettings_proc = sp.Popen( ['gsettings', 'set', SCHEMA, KEY, uri]) except: # MATE < 1.6 sp.Popen(['mateconftool-2', '-t', 'string', '--set', '/desktop/mate/background/picture_filename', '%s' % image], stdout=sp.PIPE) finally: gsettings_proc.communicate() if gsettings_proc.returncode != 0: sp.Popen(['mateconftool-2', '-t', 'string', '--set', '/desktop/mate/background/picture_filename', '%s' % image]) elif desktop_env == 'gnome2': sp.Popen( ['gconftool-2', '-t', 'string', '--set', '/desktop/gnome/background/picture_filename', image] ) elif desktop_env == 'kde': # This probably only works in Plasma 5+ kde_script = dedent( '''\ var Desktops = desktops(); for (i=0;i<Desktops.length;i++) {{ d = Desktops[i]; d.wallpaperPlugin = "org.kde.image"; d.currentConfigGroup = Array("Wallpaper", "org.kde.image", "General"); d.writeConfig("Image", "file://{}") }} ''').format(image) sp.Popen( ['dbus-send', '--session', '--dest=org.kde.plasmashell', '--type=method_call', '/PlasmaShell', 'org.kde.PlasmaShell.evaluateScript', 'string:{}'.format(kde_script)] ) elif desktop_env in ['kde3', 'trinity']: args = 'dcop kdesktop KBackgroundIface setWallpaper 0 "%s" 6' % image sp.Popen(args, shell=True) elif desktop_env == 'xfce4': # XFCE4's image property is not image-path but last-image (What?) list_of_properties = system.get_cmd_out( ['xfconf-query', '-R', '-l', '-c', 'xfce4-desktop', '-p', '/backdrop'] ) for i in list_of_properties.split('\n'): if i.endswith('last-image'): # The property given is a background property sp.Popen( ['xfconf-query -c xfce4-desktop -p %s -s "%s"' % (i, image)], shell=True) sp.Popen(['xfdesktop --reload'], shell=True) elif desktop_env == 'razor-qt': desktop_conf = configparser.ConfigParser() # Development version desktop_conf_file = os.path.join( get_config_dir('razor')[0], 'desktop.conf') if os.path.isfile(desktop_conf_file): config_option = r'screens\1\desktops\1\wallpaper' else: desktop_conf_file = os.path.join( os.path.expanduser('~'), '.razor/desktop.conf') config_option = r'desktops\1\wallpaper' desktop_conf.read(os.path.join(desktop_conf_file)) try: if desktop_conf.has_option('razor', config_option): desktop_conf.set('razor', config_option, image) with codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f: desktop_conf.write(f) except: pass elif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']: try: args = ['feh', '--bg-scale', image] sp.Popen(args) except: sys.stderr.write('Error: Failed to set wallpaper with feh!') sys.stderr.write('Please make sre that You have feh installed.') elif desktop_env == 'icewm': args = ['icewmbg', image] sp.Popen(args) elif desktop_env == 'blackbox': args = ['bsetbg', '-full', image] sp.Popen(args) elif desktop_env == 'lxde': args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image sp.Popen(args, shell=True) elif desktop_env == 'lxqt': args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image sp.Popen(args, shell=True) elif desktop_env == 'windowmaker': args = 'wmsetbg -s -u %s' % image sp.Popen(args, shell=True) elif desktop_env == 'enlightenment': args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image sp.Popen(args, shell=True) elif desktop_env == 'awesome': with sp.Popen("awesome-client", stdin=sp.PIPE) as awesome_client: command = ('local gears = require("gears"); for s = 1,' ' screen.count() do gears.wallpaper.maximized' '("%s", s, true); end;') % image awesome_client.communicate(input=bytes(command, 'UTF-8')) elif desktop_env == 'windows': WINDOWS_SCRIPT = dedent(''' reg add "HKEY_CURRENT_USER\Control Panel\Desktop" \ /v Wallpaper /t REG_SZ /d %s /f rundll32.exe user32.dll,UpdatePerUserSystemParameters ''') % image windows_script_file = os.path.join( tempfile.gettempdir(), 'wallscript.bat') with open(windows_script_file, 'w') as f: f.write(WINDOWS_SCRIPT) sp.Popen([windows_script_file], shell=True) # Sometimes the method above works # and sometimes the one below SPI_SETDESKWALLPAPER = 20 ctypes.windll.user32.SystemParametersInfoA( SPI_SETDESKWALLPAPER, 0, image, 0) elif desktop_env == 'mac': try: from appscript import app, mactypes app('Finder').desktop_picture.set(mactypes.File(image)) except ImportError: OSX_SCRIPT = dedent( '''tell application "System Events" set desktopCount to count of desktops repeat with desktopNumber from 1 to desktopCount tell desktop desktopNumber set picture to POSIX file "%s" end tell end repeat end tell''') % image sp.Popen(['osascript', OSX_SCRIPT]) else: try: sp.Popen(['feh', '--bg-scale', image]) # feh is nearly a catch-all for Linux WMs except: pass
[ "def", "set_wallpaper", "(", "image", ")", ":", "desktop_env", "=", "system", ".", "get_name", "(", ")", "if", "desktop_env", "in", "[", "'gnome'", ",", "'unity'", ",", "'cinnamon'", ",", "'pantheon'", ",", "'mate'", "]", ":", "uri", "=", "'file://%s'", ...
Set the desktop wallpaper. Sets the desktop wallpaper to an image. Args: image (str): The path to the image to be set as wallpaper.
[ "Set", "the", "desktop", "wallpaper", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/wallpaper.py#L241-L462
PGower/PyCanvas
pycanvas/apis/quiz_reports.py
QuizReportsAPI.retrieve_all_quiz_reports
def retrieve_all_quiz_reports(self, quiz_id, course_id, includes_all_versions=None): """ Retrieve all quiz reports. Returns a list of all available reports. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - quiz_id """ID""" path["quiz_id"] = quiz_id # OPTIONAL - includes_all_versions """Whether to retrieve reports that consider all the submissions or only the most recent. Defaults to false, ignored for item_analysis reports.""" if includes_all_versions is not None: params["includes_all_versions"] = includes_all_versions self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, all_pages=True)
python
def retrieve_all_quiz_reports(self, quiz_id, course_id, includes_all_versions=None): """ Retrieve all quiz reports. Returns a list of all available reports. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - quiz_id """ID""" path["quiz_id"] = quiz_id # OPTIONAL - includes_all_versions """Whether to retrieve reports that consider all the submissions or only the most recent. Defaults to false, ignored for item_analysis reports.""" if includes_all_versions is not None: params["includes_all_versions"] = includes_all_versions self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, all_pages=True)
[ "def", "retrieve_all_quiz_reports", "(", "self", ",", "quiz_id", ",", "course_id", ",", "includes_all_versions", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", ...
Retrieve all quiz reports. Returns a list of all available reports.
[ "Retrieve", "all", "quiz", "reports", ".", "Returns", "a", "list", "of", "all", "available", "reports", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/quiz_reports.py#L19-L44
PGower/PyCanvas
pycanvas/apis/quiz_reports.py
QuizReportsAPI.create_quiz_report
def create_quiz_report(self, quiz_id, course_id, quiz_report_report_type, include=None, quiz_report_includes_all_versions=None): """ Create a quiz report. Create and return a new report for this quiz. If a previously generated report matches the arguments and is still current (i.e. there have been no new submissions), it will be returned. *Responses* * <code>400 Bad Request</code> if the specified report type is invalid * <code>409 Conflict</code> if a quiz report of the specified type is already being generated """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - quiz_id """ID""" path["quiz_id"] = quiz_id # REQUIRED - quiz_report[report_type] """The type of report to be generated.""" self._validate_enum(quiz_report_report_type, ["student_analysis", "item_analysis"]) data["quiz_report[report_type]"] = quiz_report_report_type # OPTIONAL - quiz_report[includes_all_versions] """Whether the report should consider all submissions or only the most recent. Defaults to false, ignored for item_analysis.""" if quiz_report_includes_all_versions is not None: data["quiz_report[includes_all_versions]"] = quiz_report_includes_all_versions # OPTIONAL - include """Whether the output should include documents for the file and/or progress objects associated with this report. (Note: JSON-API only)""" if include is not None: self._validate_enum(include, ["file", "progress"]) data["include"] = include self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, single_item=True)
python
def create_quiz_report(self, quiz_id, course_id, quiz_report_report_type, include=None, quiz_report_includes_all_versions=None): """ Create a quiz report. Create and return a new report for this quiz. If a previously generated report matches the arguments and is still current (i.e. there have been no new submissions), it will be returned. *Responses* * <code>400 Bad Request</code> if the specified report type is invalid * <code>409 Conflict</code> if a quiz report of the specified type is already being generated """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - quiz_id """ID""" path["quiz_id"] = quiz_id # REQUIRED - quiz_report[report_type] """The type of report to be generated.""" self._validate_enum(quiz_report_report_type, ["student_analysis", "item_analysis"]) data["quiz_report[report_type]"] = quiz_report_report_type # OPTIONAL - quiz_report[includes_all_versions] """Whether the report should consider all submissions or only the most recent. Defaults to false, ignored for item_analysis.""" if quiz_report_includes_all_versions is not None: data["quiz_report[includes_all_versions]"] = quiz_report_includes_all_versions # OPTIONAL - include """Whether the output should include documents for the file and/or progress objects associated with this report. (Note: JSON-API only)""" if include is not None: self._validate_enum(include, ["file", "progress"]) data["include"] = include self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, single_item=True)
[ "def", "create_quiz_report", "(", "self", ",", "quiz_id", ",", "course_id", ",", "quiz_report_report_type", ",", "include", "=", "None", ",", "quiz_report_includes_all_versions", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", ...
Create a quiz report. Create and return a new report for this quiz. If a previously generated report matches the arguments and is still current (i.e. there have been no new submissions), it will be returned. *Responses* * <code>400 Bad Request</code> if the specified report type is invalid * <code>409 Conflict</code> if a quiz report of the specified type is already being generated
[ "Create", "a", "quiz", "report", ".", "Create", "and", "return", "a", "new", "report", "for", "this", "quiz", ".", "If", "a", "previously", "generated", "report", "matches", "the", "arguments", "and", "is", "still", "current", "(", "i", ".", "e", ".", ...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/quiz_reports.py#L46-L91
bioasp/caspo
caspo/core/setup.py
Setup.clampings_iter
def clampings_iter(self, cues=None): """ Iterates over all possible clampings of this experimental setup Parameters ---------- cues : Optional[iterable] If given, restricts clampings over given species names Yields ------ caspo.core.clamping.Clamping The next clamping with respect to the experimental setup """ s = cues or list(self.stimuli + self.inhibitors) clampings = it.chain.from_iterable(it.combinations(s, r) for r in xrange(len(s) + 1)) literals_tpl = {} for stimulus in self.stimuli: literals_tpl[stimulus] = -1 for c in clampings: literals = literals_tpl.copy() for cues in c: if cues in self.stimuli: literals[cues] = 1 else: literals[cues] = -1 yield Clamping(literals.iteritems())
python
def clampings_iter(self, cues=None): """ Iterates over all possible clampings of this experimental setup Parameters ---------- cues : Optional[iterable] If given, restricts clampings over given species names Yields ------ caspo.core.clamping.Clamping The next clamping with respect to the experimental setup """ s = cues or list(self.stimuli + self.inhibitors) clampings = it.chain.from_iterable(it.combinations(s, r) for r in xrange(len(s) + 1)) literals_tpl = {} for stimulus in self.stimuli: literals_tpl[stimulus] = -1 for c in clampings: literals = literals_tpl.copy() for cues in c: if cues in self.stimuli: literals[cues] = 1 else: literals[cues] = -1 yield Clamping(literals.iteritems())
[ "def", "clampings_iter", "(", "self", ",", "cues", "=", "None", ")", ":", "s", "=", "cues", "or", "list", "(", "self", ".", "stimuli", "+", "self", ".", "inhibitors", ")", "clampings", "=", "it", ".", "chain", ".", "from_iterable", "(", "it", ".", ...
Iterates over all possible clampings of this experimental setup Parameters ---------- cues : Optional[iterable] If given, restricts clampings over given species names Yields ------ caspo.core.clamping.Clamping The next clamping with respect to the experimental setup
[ "Iterates", "over", "all", "possible", "clampings", "of", "this", "experimental", "setup" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/setup.py#L60-L90
bioasp/caspo
caspo/core/setup.py
Setup.to_funset
def to_funset(self): """ Converts the experimental setup to a set of `gringo.Fun`_ object instances Returns ------- set The set of `gringo.Fun`_ object instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set((gringo.Fun('stimulus', [str(var)]) for var in self.stimuli)) fs = fs.union((gringo.Fun('inhibitor', [str(var)]) for var in self.inhibitors)) fs = fs.union((gringo.Fun('readout', [str(var)]) for var in self.readouts)) return fs
python
def to_funset(self): """ Converts the experimental setup to a set of `gringo.Fun`_ object instances Returns ------- set The set of `gringo.Fun`_ object instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set((gringo.Fun('stimulus', [str(var)]) for var in self.stimuli)) fs = fs.union((gringo.Fun('inhibitor', [str(var)]) for var in self.inhibitors)) fs = fs.union((gringo.Fun('readout', [str(var)]) for var in self.readouts)) return fs
[ "def", "to_funset", "(", "self", ")", ":", "fs", "=", "set", "(", "(", "gringo", ".", "Fun", "(", "'stimulus'", ",", "[", "str", "(", "var", ")", "]", ")", "for", "var", "in", "self", ".", "stimuli", ")", ")", "fs", "=", "fs", ".", "union", "...
Converts the experimental setup to a set of `gringo.Fun`_ object instances Returns ------- set The set of `gringo.Fun`_ object instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun
[ "Converts", "the", "experimental", "setup", "to", "a", "set", "of", "gringo", ".", "Fun", "_", "object", "instances" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/setup.py#L92-L108
bioasp/caspo
caspo/core/setup.py
Setup.from_json
def from_json(cls, filename): """ Creates an experimental setup from a JSON file Parameters ---------- filename : str Absolute path to JSON file Returns ------- caspo.core.setup.Setup Created object instance """ with open(filename) as fp: raw = json.load(fp) return cls(raw['stimuli'], raw['inhibitors'], raw['readouts'])
python
def from_json(cls, filename): """ Creates an experimental setup from a JSON file Parameters ---------- filename : str Absolute path to JSON file Returns ------- caspo.core.setup.Setup Created object instance """ with open(filename) as fp: raw = json.load(fp) return cls(raw['stimuli'], raw['inhibitors'], raw['readouts'])
[ "def", "from_json", "(", "cls", ",", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "fp", ":", "raw", "=", "json", ".", "load", "(", "fp", ")", "return", "cls", "(", "raw", "[", "'stimuli'", "]", ",", "raw", "[", "'inhibitors'", ...
Creates an experimental setup from a JSON file Parameters ---------- filename : str Absolute path to JSON file Returns ------- caspo.core.setup.Setup Created object instance
[ "Creates", "an", "experimental", "setup", "from", "a", "JSON", "file" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/setup.py#L111-L128
bioasp/caspo
caspo/core/setup.py
Setup.to_json
def to_json(self, filename): """ Writes the experimental setup to a JSON file Parameters ---------- filename : str Absolute path where to write the JSON file """ with open(filename, 'w') as fp: json.dump(dict(stimuli=self.stimuli, inhibitors=self.inhibitors, readouts=self.readouts), fp)
python
def to_json(self, filename): """ Writes the experimental setup to a JSON file Parameters ---------- filename : str Absolute path where to write the JSON file """ with open(filename, 'w') as fp: json.dump(dict(stimuli=self.stimuli, inhibitors=self.inhibitors, readouts=self.readouts), fp)
[ "def", "to_json", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fp", ":", "json", ".", "dump", "(", "dict", "(", "stimuli", "=", "self", ".", "stimuli", ",", "inhibitors", "=", "self", ".", "inhibi...
Writes the experimental setup to a JSON file Parameters ---------- filename : str Absolute path where to write the JSON file
[ "Writes", "the", "experimental", "setup", "to", "a", "JSON", "file" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/setup.py#L130-L140
bioasp/caspo
caspo/core/setup.py
Setup.filter
def filter(self, networks): """ Returns a new experimental setup restricted to species present in the given list of networks Parameters ---------- networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList` List of logical networks Returns ------- caspo.core.setup.Setup The restricted experimental setup """ cues = self.stimuli + self.inhibitors active_cues = set() active_readouts = set() for clause, var in networks.mappings: active_cues = active_cues.union((l for (l, s) in clause if l in cues)) if var in self.readouts: active_readouts.add(var) return Setup(active_cues.intersection(self.stimuli), active_cues.intersection(self.inhibitors), active_readouts)
python
def filter(self, networks): """ Returns a new experimental setup restricted to species present in the given list of networks Parameters ---------- networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList` List of logical networks Returns ------- caspo.core.setup.Setup The restricted experimental setup """ cues = self.stimuli + self.inhibitors active_cues = set() active_readouts = set() for clause, var in networks.mappings: active_cues = active_cues.union((l for (l, s) in clause if l in cues)) if var in self.readouts: active_readouts.add(var) return Setup(active_cues.intersection(self.stimuli), active_cues.intersection(self.inhibitors), active_readouts)
[ "def", "filter", "(", "self", ",", "networks", ")", ":", "cues", "=", "self", ".", "stimuli", "+", "self", ".", "inhibitors", "active_cues", "=", "set", "(", ")", "active_readouts", "=", "set", "(", ")", "for", "clause", ",", "var", "in", "networks", ...
Returns a new experimental setup restricted to species present in the given list of networks Parameters ---------- networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList` List of logical networks Returns ------- caspo.core.setup.Setup The restricted experimental setup
[ "Returns", "a", "new", "experimental", "setup", "restricted", "to", "species", "present", "in", "the", "given", "list", "of", "networks" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/setup.py#L142-L164
bioasp/caspo
caspo/core/setup.py
Setup.cues
def cues(self, rename_inhibitors=False): """ Returns stimuli and inhibitors species of this experimental setup Parameters ---------- rename_inhibitors : boolean If True, rename inhibitors with an ending 'i' as in MIDAS files. Returns ------- list List of species names in order: first stimuli followed by inhibitors """ if rename_inhibitors: return self.stimuli + [i+'i' for i in self.inhibitors] else: return self.stimuli + self.inhibitors
python
def cues(self, rename_inhibitors=False): """ Returns stimuli and inhibitors species of this experimental setup Parameters ---------- rename_inhibitors : boolean If True, rename inhibitors with an ending 'i' as in MIDAS files. Returns ------- list List of species names in order: first stimuli followed by inhibitors """ if rename_inhibitors: return self.stimuli + [i+'i' for i in self.inhibitors] else: return self.stimuli + self.inhibitors
[ "def", "cues", "(", "self", ",", "rename_inhibitors", "=", "False", ")", ":", "if", "rename_inhibitors", ":", "return", "self", ".", "stimuli", "+", "[", "i", "+", "'i'", "for", "i", "in", "self", ".", "inhibitors", "]", "else", ":", "return", "self", ...
Returns stimuli and inhibitors species of this experimental setup Parameters ---------- rename_inhibitors : boolean If True, rename inhibitors with an ending 'i' as in MIDAS files. Returns ------- list List of species names in order: first stimuli followed by inhibitors
[ "Returns", "stimuli", "and", "inhibitors", "species", "of", "this", "experimental", "setup" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/setup.py#L166-L183
PGower/PyCanvas
pycanvas/apis/external_tools.py
ExternalToolsAPI.list_external_tools_courses
def list_external_tools_courses(self, course_id, include_parents=None, search_term=None, selectable=None): """ List external tools. Returns the paginated list of external tools for the current context. See the get request docs for a single tool for a list of properties on an external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - search_term """The partial name of the tools to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - selectable """If true, then only tools that are meant to be selectable are returned""" if selectable is not None: params["selectable"] = selectable # OPTIONAL - include_parents """If true, then include tools installed in all accounts above the current context""" if include_parents is not None: params["include_parents"] = include_parents self.logger.debug("GET /api/v1/courses/{course_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools".format(**path), data=data, params=params, no_data=True)
python
def list_external_tools_courses(self, course_id, include_parents=None, search_term=None, selectable=None): """ List external tools. Returns the paginated list of external tools for the current context. See the get request docs for a single tool for a list of properties on an external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - search_term """The partial name of the tools to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - selectable """If true, then only tools that are meant to be selectable are returned""" if selectable is not None: params["selectable"] = selectable # OPTIONAL - include_parents """If true, then include tools installed in all accounts above the current context""" if include_parents is not None: params["include_parents"] = include_parents self.logger.debug("GET /api/v1/courses/{course_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools".format(**path), data=data, params=params, no_data=True)
[ "def", "list_external_tools_courses", "(", "self", ",", "course_id", ",", "include_parents", "=", "None", ",", "search_term", "=", "None", ",", "selectable", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", ...
List external tools. Returns the paginated list of external tools for the current context. See the get request docs for a single tool for a list of properties on an external tool.
[ "List", "external", "tools", ".", "Returns", "the", "paginated", "list", "of", "external", "tools", "for", "the", "current", "context", ".", "See", "the", "get", "request", "docs", "for", "a", "single", "tool", "for", "a", "list", "of", "properties", "on",...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/external_tools.py#L19-L50
PGower/PyCanvas
pycanvas/apis/external_tools.py
ExternalToolsAPI.list_external_tools_accounts
def list_external_tools_accounts(self, account_id, include_parents=None, search_term=None, selectable=None): """ List external tools. Returns the paginated list of external tools for the current context. See the get request docs for a single tool for a list of properties on an external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - search_term """The partial name of the tools to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - selectable """If true, then only tools that are meant to be selectable are returned""" if selectable is not None: params["selectable"] = selectable # OPTIONAL - include_parents """If true, then include tools installed in all accounts above the current context""" if include_parents is not None: params["include_parents"] = include_parents self.logger.debug("GET /api/v1/accounts/{account_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/external_tools".format(**path), data=data, params=params, no_data=True)
python
def list_external_tools_accounts(self, account_id, include_parents=None, search_term=None, selectable=None): """ List external tools. Returns the paginated list of external tools for the current context. See the get request docs for a single tool for a list of properties on an external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - search_term """The partial name of the tools to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - selectable """If true, then only tools that are meant to be selectable are returned""" if selectable is not None: params["selectable"] = selectable # OPTIONAL - include_parents """If true, then include tools installed in all accounts above the current context""" if include_parents is not None: params["include_parents"] = include_parents self.logger.debug("GET /api/v1/accounts/{account_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/external_tools".format(**path), data=data, params=params, no_data=True)
[ "def", "list_external_tools_accounts", "(", "self", ",", "account_id", ",", "include_parents", "=", "None", ",", "search_term", "=", "None", ",", "selectable", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", ...
List external tools. Returns the paginated list of external tools for the current context. See the get request docs for a single tool for a list of properties on an external tool.
[ "List", "external", "tools", ".", "Returns", "the", "paginated", "list", "of", "external", "tools", "for", "the", "current", "context", ".", "See", "the", "get", "request", "docs", "for", "a", "single", "tool", "for", "a", "list", "of", "properties", "on",...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/external_tools.py#L52-L83
PGower/PyCanvas
pycanvas/apis/external_tools.py
ExternalToolsAPI.list_external_tools_groups
def list_external_tools_groups(self, group_id, include_parents=None, search_term=None, selectable=None): """ List external tools. Returns the paginated list of external tools for the current context. See the get request docs for a single tool for a list of properties on an external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # OPTIONAL - search_term """The partial name of the tools to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - selectable """If true, then only tools that are meant to be selectable are returned""" if selectable is not None: params["selectable"] = selectable # OPTIONAL - include_parents """If true, then include tools installed in all accounts above the current context""" if include_parents is not None: params["include_parents"] = include_parents self.logger.debug("GET /api/v1/groups/{group_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/external_tools".format(**path), data=data, params=params, no_data=True)
python
def list_external_tools_groups(self, group_id, include_parents=None, search_term=None, selectable=None): """ List external tools. Returns the paginated list of external tools for the current context. See the get request docs for a single tool for a list of properties on an external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # OPTIONAL - search_term """The partial name of the tools to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - selectable """If true, then only tools that are meant to be selectable are returned""" if selectable is not None: params["selectable"] = selectable # OPTIONAL - include_parents """If true, then include tools installed in all accounts above the current context""" if include_parents is not None: params["include_parents"] = include_parents self.logger.debug("GET /api/v1/groups/{group_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/external_tools".format(**path), data=data, params=params, no_data=True)
[ "def", "list_external_tools_groups", "(", "self", ",", "group_id", ",", "include_parents", "=", "None", ",", "search_term", "=", "None", ",", "selectable", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "#...
List external tools. Returns the paginated list of external tools for the current context. See the get request docs for a single tool for a list of properties on an external tool.
[ "List", "external", "tools", ".", "Returns", "the", "paginated", "list", "of", "external", "tools", "for", "the", "current", "context", ".", "See", "the", "get", "request", "docs", "for", "a", "single", "tool", "for", "a", "list", "of", "properties", "on",...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/external_tools.py#L85-L116
PGower/PyCanvas
pycanvas/apis/external_tools.py
ExternalToolsAPI.get_sessionless_launch_url_for_external_tool_courses
def get_sessionless_launch_url_for_external_tool_courses(self, course_id, assignment_id=None, id=None, launch_type=None, module_item_id=None, url=None): """ Get a sessionless launch url for an external tool. Returns a sessionless launch url for an external tool. NOTE: Either the id or url must be provided unless launch_type is assessment or module_item. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - id """The external id of the tool to launch.""" if id is not None: params["id"] = id # OPTIONAL - url """The LTI launch url for the external tool.""" if url is not None: params["url"] = url # OPTIONAL - assignment_id """The assignment id for an assignment launch. Required if launch_type is set to "assessment".""" if assignment_id is not None: params["assignment_id"] = assignment_id # OPTIONAL - module_item_id """The assignment id for a module item launch. Required if launch_type is set to "module_item".""" if module_item_id is not None: params["module_item_id"] = module_item_id # OPTIONAL - launch_type """The type of launch to perform on the external tool. Placement names (eg. "course_navigation") can also be specified to use the custom launch url for that placement; if done, the tool id must be provided.""" if launch_type is not None: self._validate_enum(launch_type, ["assessment", "module_item"]) params["launch_type"] = launch_type self.logger.debug("GET /api/v1/courses/{course_id}/external_tools/sessionless_launch with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools/sessionless_launch".format(**path), data=data, params=params, no_data=True)
python
def get_sessionless_launch_url_for_external_tool_courses(self, course_id, assignment_id=None, id=None, launch_type=None, module_item_id=None, url=None): """ Get a sessionless launch url for an external tool. Returns a sessionless launch url for an external tool. NOTE: Either the id or url must be provided unless launch_type is assessment or module_item. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - id """The external id of the tool to launch.""" if id is not None: params["id"] = id # OPTIONAL - url """The LTI launch url for the external tool.""" if url is not None: params["url"] = url # OPTIONAL - assignment_id """The assignment id for an assignment launch. Required if launch_type is set to "assessment".""" if assignment_id is not None: params["assignment_id"] = assignment_id # OPTIONAL - module_item_id """The assignment id for a module item launch. Required if launch_type is set to "module_item".""" if module_item_id is not None: params["module_item_id"] = module_item_id # OPTIONAL - launch_type """The type of launch to perform on the external tool. Placement names (eg. "course_navigation") can also be specified to use the custom launch url for that placement; if done, the tool id must be provided.""" if launch_type is not None: self._validate_enum(launch_type, ["assessment", "module_item"]) params["launch_type"] = launch_type self.logger.debug("GET /api/v1/courses/{course_id}/external_tools/sessionless_launch with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools/sessionless_launch".format(**path), data=data, params=params, no_data=True)
[ "def", "get_sessionless_launch_url_for_external_tool_courses", "(", "self", ",", "course_id", ",", "assignment_id", "=", "None", ",", "id", "=", "None", ",", "launch_type", "=", "None", ",", "module_item_id", "=", "None", ",", "url", "=", "None", ")", ":", "pa...
Get a sessionless launch url for an external tool. Returns a sessionless launch url for an external tool. NOTE: Either the id or url must be provided unless launch_type is assessment or module_item.
[ "Get", "a", "sessionless", "launch", "url", "for", "an", "external", "tool", ".", "Returns", "a", "sessionless", "launch", "url", "for", "an", "external", "tool", ".", "NOTE", ":", "Either", "the", "id", "or", "url", "must", "be", "provided", "unless", "...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/external_tools.py#L118-L163
PGower/PyCanvas
pycanvas/apis/external_tools.py
ExternalToolsAPI.get_single_external_tool_courses
def get_single_external_tool_courses(self, course_id, external_tool_id): """ Get a single external tool. Returns the specified external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - external_tool_id """ID""" path["external_tool_id"] = external_tool_id self.logger.debug("GET /api/v1/courses/{course_id}/external_tools/{external_tool_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools/{external_tool_id}".format(**path), data=data, params=params, no_data=True)
python
def get_single_external_tool_courses(self, course_id, external_tool_id): """ Get a single external tool. Returns the specified external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - external_tool_id """ID""" path["external_tool_id"] = external_tool_id self.logger.debug("GET /api/v1/courses/{course_id}/external_tools/{external_tool_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools/{external_tool_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "get_single_external_tool_courses", "(", "self", ",", "course_id", ",", "external_tool_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", ...
Get a single external tool. Returns the specified external tool.
[ "Get", "a", "single", "external", "tool", ".", "Returns", "the", "specified", "external", "tool", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/external_tools.py#L212-L231
PGower/PyCanvas
pycanvas/apis/external_tools.py
ExternalToolsAPI.get_single_external_tool_accounts
def get_single_external_tool_accounts(self, account_id, external_tool_id): """ Get a single external tool. Returns the specified external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - external_tool_id """ID""" path["external_tool_id"] = external_tool_id self.logger.debug("GET /api/v1/accounts/{account_id}/external_tools/{external_tool_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/external_tools/{external_tool_id}".format(**path), data=data, params=params, no_data=True)
python
def get_single_external_tool_accounts(self, account_id, external_tool_id): """ Get a single external tool. Returns the specified external tool. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - external_tool_id """ID""" path["external_tool_id"] = external_tool_id self.logger.debug("GET /api/v1/accounts/{account_id}/external_tools/{external_tool_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/external_tools/{external_tool_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "get_single_external_tool_accounts", "(", "self", ",", "account_id", ",", "external_tool_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - account_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"account_id\"...
Get a single external tool. Returns the specified external tool.
[ "Get", "a", "single", "external", "tool", ".", "Returns", "the", "specified", "external", "tool", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/external_tools.py#L233-L252
PGower/PyCanvas
pycanvas/apis/external_tools.py
ExternalToolsAPI.create_external_tool_courses
def create_external_tool_courses(self, name, course_id, consumer_key, privacy_level, shared_secret, account_navigation_enabled=None, account_navigation_selection_height=None, account_navigation_selection_width=None, account_navigation_text=None, account_navigation_url=None, config_type=None, config_url=None, config_xml=None, course_home_sub_navigation_enabled=None, course_home_sub_navigation_icon_url=None, course_home_sub_navigation_text=None, course_home_sub_navigation_url=None, course_navigation_default=None, course_navigation_enabled=None, course_navigation_text=None, course_navigation_visibility=None, course_navigation_windowTarget=None, custom_fields_field_name=None, description=None, domain=None, editor_button_enabled=None, editor_button_icon_url=None, editor_button_message_type=None, editor_button_selection_height=None, editor_button_selection_width=None, editor_button_url=None, homework_submission_enabled=None, homework_submission_message_type=None, homework_submission_text=None, homework_submission_url=None, icon_url=None, link_selection_enabled=None, link_selection_message_type=None, link_selection_text=None, link_selection_url=None, migration_selection_enabled=None, migration_selection_message_type=None, migration_selection_url=None, not_selectable=None, oauth_compliant=None, resource_selection_enabled=None, resource_selection_icon_url=None, resource_selection_selection_height=None, resource_selection_selection_width=None, resource_selection_url=None, text=None, tool_configuration_enabled=None, tool_configuration_message_type=None, tool_configuration_url=None, url=None, user_navigation_enabled=None, user_navigation_text=None, user_navigation_url=None): """ Create an external tool. Create an external tool in the specified course/account. The created tool will be returned, see the "show" endpoint for an example. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - name """The name of the tool""" data["name"] = name # REQUIRED - privacy_level """What information to send to the external tool.""" self._validate_enum(privacy_level, ["anonymous", "name_only", "public"]) data["privacy_level"] = privacy_level # REQUIRED - consumer_key """The consumer key for the external tool""" data["consumer_key"] = consumer_key # REQUIRED - shared_secret """The shared secret with the external tool""" data["shared_secret"] = shared_secret # OPTIONAL - description """A description of the tool""" if description is not None: data["description"] = description # OPTIONAL - url """The url to match links against. Either "url" or "domain" should be set, not both.""" if url is not None: data["url"] = url # OPTIONAL - domain """The domain to match links against. Either "url" or "domain" should be set, not both.""" if domain is not None: data["domain"] = domain # OPTIONAL - icon_url """The url of the icon to show for this tool""" if icon_url is not None: data["icon_url"] = icon_url # OPTIONAL - text """The default text to show for this tool""" if text is not None: data["text"] = text # OPTIONAL - custom_fields[field_name] """Custom fields that will be sent to the tool consumer; can be used multiple times""" if custom_fields_field_name is not None: data["custom_fields[field_name]"] = custom_fields_field_name # OPTIONAL - account_navigation[url] """The url of the external tool for account navigation""" if account_navigation_url is not None: data["account_navigation[url]"] = account_navigation_url # OPTIONAL - account_navigation[enabled] """Set this to enable this feature""" if account_navigation_enabled is not None: data["account_navigation[enabled]"] = account_navigation_enabled # OPTIONAL - account_navigation[text] """The text that will show on the left-tab in the account navigation""" if account_navigation_text is not None: data["account_navigation[text]"] = account_navigation_text # OPTIONAL - account_navigation[selection_width] """The width of the dialog the tool is launched in""" if account_navigation_selection_width is not None: data["account_navigation[selection_width]"] = account_navigation_selection_width # OPTIONAL - account_navigation[selection_height] """The height of the dialog the tool is launched in""" if account_navigation_selection_height is not None: data["account_navigation[selection_height]"] = account_navigation_selection_height # OPTIONAL - user_navigation[url] """The url of the external tool for user navigation""" if user_navigation_url is not None: data["user_navigation[url]"] = user_navigation_url # OPTIONAL - user_navigation[enabled] """Set this to enable this feature""" if user_navigation_enabled is not None: data["user_navigation[enabled]"] = user_navigation_enabled # OPTIONAL - user_navigation[text] """The text that will show on the left-tab in the user navigation""" if user_navigation_text is not None: data["user_navigation[text]"] = user_navigation_text # OPTIONAL - course_home_sub_navigation[url] """The url of the external tool for right-side course home navigation menu""" if course_home_sub_navigation_url is not None: data["course_home_sub_navigation[url]"] = course_home_sub_navigation_url # OPTIONAL - course_home_sub_navigation[enabled] """Set this to enable this feature""" if course_home_sub_navigation_enabled is not None: data["course_home_sub_navigation[enabled]"] = course_home_sub_navigation_enabled # OPTIONAL - course_home_sub_navigation[text] """The text that will show on the right-side course home navigation menu""" if course_home_sub_navigation_text is not None: data["course_home_sub_navigation[text]"] = course_home_sub_navigation_text # OPTIONAL - course_home_sub_navigation[icon_url] """The url of the icon to show in the right-side course home navigation menu""" if course_home_sub_navigation_icon_url is not None: data["course_home_sub_navigation[icon_url]"] = course_home_sub_navigation_icon_url # OPTIONAL - course_navigation[enabled] """Set this to enable this feature""" if course_navigation_enabled is not None: data["course_navigation[enabled]"] = course_navigation_enabled # OPTIONAL - course_navigation[text] """The text that will show on the left-tab in the course navigation""" if course_navigation_text is not None: data["course_navigation[text]"] = course_navigation_text # OPTIONAL - course_navigation[visibility] """Who will see the navigation tab. "admins" for course admins, "members" for students, null for everyone""" if course_navigation_visibility is not None: self._validate_enum(course_navigation_visibility, ["admins", "members"]) data["course_navigation[visibility]"] = course_navigation_visibility # OPTIONAL - course_navigation[windowTarget] """Determines how the navigation tab will be opened. "_blank" Launches the external tool in a new window or tab. "_self" (Default) Launches the external tool in an iframe inside of Canvas.""" if course_navigation_windowTarget is not None: self._validate_enum(course_navigation_windowTarget, ["_blank", "_self"]) data["course_navigation[windowTarget]"] = course_navigation_windowTarget # OPTIONAL - course_navigation[default] """Whether the navigation option will show in the course by default or whether the teacher will have to explicitly enable it""" if course_navigation_default is not None: data["course_navigation[default]"] = course_navigation_default # OPTIONAL - editor_button[url] """The url of the external tool""" if editor_button_url is not None: data["editor_button[url]"] = editor_button_url # OPTIONAL - editor_button[enabled] """Set this to enable this feature""" if editor_button_enabled is not None: data["editor_button[enabled]"] = editor_button_enabled # OPTIONAL - editor_button[icon_url] """The url of the icon to show in the WYSIWYG editor""" if editor_button_icon_url is not None: data["editor_button[icon_url]"] = editor_button_icon_url # OPTIONAL - editor_button[selection_width] """The width of the dialog the tool is launched in""" if editor_button_selection_width is not None: data["editor_button[selection_width]"] = editor_button_selection_width # OPTIONAL - editor_button[selection_height] """The height of the dialog the tool is launched in""" if editor_button_selection_height is not None: data["editor_button[selection_height]"] = editor_button_selection_height # OPTIONAL - editor_button[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if editor_button_message_type is not None: data["editor_button[message_type]"] = editor_button_message_type # OPTIONAL - homework_submission[url] """The url of the external tool""" if homework_submission_url is not None: data["homework_submission[url]"] = homework_submission_url # OPTIONAL - homework_submission[enabled] """Set this to enable this feature""" if homework_submission_enabled is not None: data["homework_submission[enabled]"] = homework_submission_enabled # OPTIONAL - homework_submission[text] """The text that will show on the homework submission tab""" if homework_submission_text is not None: data["homework_submission[text]"] = homework_submission_text # OPTIONAL - homework_submission[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if homework_submission_message_type is not None: data["homework_submission[message_type]"] = homework_submission_message_type # OPTIONAL - link_selection[url] """The url of the external tool""" if link_selection_url is not None: data["link_selection[url]"] = link_selection_url # OPTIONAL - link_selection[enabled] """Set this to enable this feature""" if link_selection_enabled is not None: data["link_selection[enabled]"] = link_selection_enabled # OPTIONAL - link_selection[text] """The text that will show for the link selection text""" if link_selection_text is not None: data["link_selection[text]"] = link_selection_text # OPTIONAL - link_selection[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if link_selection_message_type is not None: data["link_selection[message_type]"] = link_selection_message_type # OPTIONAL - migration_selection[url] """The url of the external tool""" if migration_selection_url is not None: data["migration_selection[url]"] = migration_selection_url # OPTIONAL - migration_selection[enabled] """Set this to enable this feature""" if migration_selection_enabled is not None: data["migration_selection[enabled]"] = migration_selection_enabled # OPTIONAL - migration_selection[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if migration_selection_message_type is not None: data["migration_selection[message_type]"] = migration_selection_message_type # OPTIONAL - tool_configuration[url] """The url of the external tool""" if tool_configuration_url is not None: data["tool_configuration[url]"] = tool_configuration_url # OPTIONAL - tool_configuration[enabled] """Set this to enable this feature""" if tool_configuration_enabled is not None: data["tool_configuration[enabled]"] = tool_configuration_enabled # OPTIONAL - tool_configuration[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if tool_configuration_message_type is not None: data["tool_configuration[message_type]"] = tool_configuration_message_type # OPTIONAL - resource_selection[url] """The url of the external tool""" if resource_selection_url is not None: data["resource_selection[url]"] = resource_selection_url # OPTIONAL - resource_selection[enabled] """Set this to enable this feature""" if resource_selection_enabled is not None: data["resource_selection[enabled]"] = resource_selection_enabled # OPTIONAL - resource_selection[icon_url] """The url of the icon to show in the module external tool list""" if resource_selection_icon_url is not None: data["resource_selection[icon_url]"] = resource_selection_icon_url # OPTIONAL - resource_selection[selection_width] """The width of the dialog the tool is launched in""" if resource_selection_selection_width is not None: data["resource_selection[selection_width]"] = resource_selection_selection_width # OPTIONAL - resource_selection[selection_height] """The height of the dialog the tool is launched in""" if resource_selection_selection_height is not None: data["resource_selection[selection_height]"] = resource_selection_selection_height # OPTIONAL - config_type """Configuration can be passed in as CC xml instead of using query parameters. If this value is "by_url" or "by_xml" then an xml configuration will be expected in either the "config_xml" or "config_url" parameter. Note that the name parameter overrides the tool name provided in the xml""" if config_type is not None: data["config_type"] = config_type # OPTIONAL - config_xml """XML tool configuration, as specified in the CC xml specification. This is required if "config_type" is set to "by_xml"""" if config_xml is not None: data["config_xml"] = config_xml # OPTIONAL - config_url """URL where the server can retrieve an XML tool configuration, as specified in the CC xml specification. This is required if "config_type" is set to "by_url"""" if config_url is not None: data["config_url"] = config_url # OPTIONAL - not_selectable """Default: false, if set to true the tool won't show up in the external tool selection UI in modules and assignments""" if not_selectable is not None: data["not_selectable"] = not_selectable # OPTIONAL - oauth_compliant """Default: false, if set to true LTI query params will not be copied to the post body.""" if oauth_compliant is not None: data["oauth_compliant"] = oauth_compliant self.logger.debug("POST /api/v1/courses/{course_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/external_tools".format(**path), data=data, params=params, no_data=True)
python
def create_external_tool_courses(self, name, course_id, consumer_key, privacy_level, shared_secret, account_navigation_enabled=None, account_navigation_selection_height=None, account_navigation_selection_width=None, account_navigation_text=None, account_navigation_url=None, config_type=None, config_url=None, config_xml=None, course_home_sub_navigation_enabled=None, course_home_sub_navigation_icon_url=None, course_home_sub_navigation_text=None, course_home_sub_navigation_url=None, course_navigation_default=None, course_navigation_enabled=None, course_navigation_text=None, course_navigation_visibility=None, course_navigation_windowTarget=None, custom_fields_field_name=None, description=None, domain=None, editor_button_enabled=None, editor_button_icon_url=None, editor_button_message_type=None, editor_button_selection_height=None, editor_button_selection_width=None, editor_button_url=None, homework_submission_enabled=None, homework_submission_message_type=None, homework_submission_text=None, homework_submission_url=None, icon_url=None, link_selection_enabled=None, link_selection_message_type=None, link_selection_text=None, link_selection_url=None, migration_selection_enabled=None, migration_selection_message_type=None, migration_selection_url=None, not_selectable=None, oauth_compliant=None, resource_selection_enabled=None, resource_selection_icon_url=None, resource_selection_selection_height=None, resource_selection_selection_width=None, resource_selection_url=None, text=None, tool_configuration_enabled=None, tool_configuration_message_type=None, tool_configuration_url=None, url=None, user_navigation_enabled=None, user_navigation_text=None, user_navigation_url=None): """ Create an external tool. Create an external tool in the specified course/account. The created tool will be returned, see the "show" endpoint for an example. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - name """The name of the tool""" data["name"] = name # REQUIRED - privacy_level """What information to send to the external tool.""" self._validate_enum(privacy_level, ["anonymous", "name_only", "public"]) data["privacy_level"] = privacy_level # REQUIRED - consumer_key """The consumer key for the external tool""" data["consumer_key"] = consumer_key # REQUIRED - shared_secret """The shared secret with the external tool""" data["shared_secret"] = shared_secret # OPTIONAL - description """A description of the tool""" if description is not None: data["description"] = description # OPTIONAL - url """The url to match links against. Either "url" or "domain" should be set, not both.""" if url is not None: data["url"] = url # OPTIONAL - domain """The domain to match links against. Either "url" or "domain" should be set, not both.""" if domain is not None: data["domain"] = domain # OPTIONAL - icon_url """The url of the icon to show for this tool""" if icon_url is not None: data["icon_url"] = icon_url # OPTIONAL - text """The default text to show for this tool""" if text is not None: data["text"] = text # OPTIONAL - custom_fields[field_name] """Custom fields that will be sent to the tool consumer; can be used multiple times""" if custom_fields_field_name is not None: data["custom_fields[field_name]"] = custom_fields_field_name # OPTIONAL - account_navigation[url] """The url of the external tool for account navigation""" if account_navigation_url is not None: data["account_navigation[url]"] = account_navigation_url # OPTIONAL - account_navigation[enabled] """Set this to enable this feature""" if account_navigation_enabled is not None: data["account_navigation[enabled]"] = account_navigation_enabled # OPTIONAL - account_navigation[text] """The text that will show on the left-tab in the account navigation""" if account_navigation_text is not None: data["account_navigation[text]"] = account_navigation_text # OPTIONAL - account_navigation[selection_width] """The width of the dialog the tool is launched in""" if account_navigation_selection_width is not None: data["account_navigation[selection_width]"] = account_navigation_selection_width # OPTIONAL - account_navigation[selection_height] """The height of the dialog the tool is launched in""" if account_navigation_selection_height is not None: data["account_navigation[selection_height]"] = account_navigation_selection_height # OPTIONAL - user_navigation[url] """The url of the external tool for user navigation""" if user_navigation_url is not None: data["user_navigation[url]"] = user_navigation_url # OPTIONAL - user_navigation[enabled] """Set this to enable this feature""" if user_navigation_enabled is not None: data["user_navigation[enabled]"] = user_navigation_enabled # OPTIONAL - user_navigation[text] """The text that will show on the left-tab in the user navigation""" if user_navigation_text is not None: data["user_navigation[text]"] = user_navigation_text # OPTIONAL - course_home_sub_navigation[url] """The url of the external tool for right-side course home navigation menu""" if course_home_sub_navigation_url is not None: data["course_home_sub_navigation[url]"] = course_home_sub_navigation_url # OPTIONAL - course_home_sub_navigation[enabled] """Set this to enable this feature""" if course_home_sub_navigation_enabled is not None: data["course_home_sub_navigation[enabled]"] = course_home_sub_navigation_enabled # OPTIONAL - course_home_sub_navigation[text] """The text that will show on the right-side course home navigation menu""" if course_home_sub_navigation_text is not None: data["course_home_sub_navigation[text]"] = course_home_sub_navigation_text # OPTIONAL - course_home_sub_navigation[icon_url] """The url of the icon to show in the right-side course home navigation menu""" if course_home_sub_navigation_icon_url is not None: data["course_home_sub_navigation[icon_url]"] = course_home_sub_navigation_icon_url # OPTIONAL - course_navigation[enabled] """Set this to enable this feature""" if course_navigation_enabled is not None: data["course_navigation[enabled]"] = course_navigation_enabled # OPTIONAL - course_navigation[text] """The text that will show on the left-tab in the course navigation""" if course_navigation_text is not None: data["course_navigation[text]"] = course_navigation_text # OPTIONAL - course_navigation[visibility] """Who will see the navigation tab. "admins" for course admins, "members" for students, null for everyone""" if course_navigation_visibility is not None: self._validate_enum(course_navigation_visibility, ["admins", "members"]) data["course_navigation[visibility]"] = course_navigation_visibility # OPTIONAL - course_navigation[windowTarget] """Determines how the navigation tab will be opened. "_blank" Launches the external tool in a new window or tab. "_self" (Default) Launches the external tool in an iframe inside of Canvas.""" if course_navigation_windowTarget is not None: self._validate_enum(course_navigation_windowTarget, ["_blank", "_self"]) data["course_navigation[windowTarget]"] = course_navigation_windowTarget # OPTIONAL - course_navigation[default] """Whether the navigation option will show in the course by default or whether the teacher will have to explicitly enable it""" if course_navigation_default is not None: data["course_navigation[default]"] = course_navigation_default # OPTIONAL - editor_button[url] """The url of the external tool""" if editor_button_url is not None: data["editor_button[url]"] = editor_button_url # OPTIONAL - editor_button[enabled] """Set this to enable this feature""" if editor_button_enabled is not None: data["editor_button[enabled]"] = editor_button_enabled # OPTIONAL - editor_button[icon_url] """The url of the icon to show in the WYSIWYG editor""" if editor_button_icon_url is not None: data["editor_button[icon_url]"] = editor_button_icon_url # OPTIONAL - editor_button[selection_width] """The width of the dialog the tool is launched in""" if editor_button_selection_width is not None: data["editor_button[selection_width]"] = editor_button_selection_width # OPTIONAL - editor_button[selection_height] """The height of the dialog the tool is launched in""" if editor_button_selection_height is not None: data["editor_button[selection_height]"] = editor_button_selection_height # OPTIONAL - editor_button[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if editor_button_message_type is not None: data["editor_button[message_type]"] = editor_button_message_type # OPTIONAL - homework_submission[url] """The url of the external tool""" if homework_submission_url is not None: data["homework_submission[url]"] = homework_submission_url # OPTIONAL - homework_submission[enabled] """Set this to enable this feature""" if homework_submission_enabled is not None: data["homework_submission[enabled]"] = homework_submission_enabled # OPTIONAL - homework_submission[text] """The text that will show on the homework submission tab""" if homework_submission_text is not None: data["homework_submission[text]"] = homework_submission_text # OPTIONAL - homework_submission[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if homework_submission_message_type is not None: data["homework_submission[message_type]"] = homework_submission_message_type # OPTIONAL - link_selection[url] """The url of the external tool""" if link_selection_url is not None: data["link_selection[url]"] = link_selection_url # OPTIONAL - link_selection[enabled] """Set this to enable this feature""" if link_selection_enabled is not None: data["link_selection[enabled]"] = link_selection_enabled # OPTIONAL - link_selection[text] """The text that will show for the link selection text""" if link_selection_text is not None: data["link_selection[text]"] = link_selection_text # OPTIONAL - link_selection[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if link_selection_message_type is not None: data["link_selection[message_type]"] = link_selection_message_type # OPTIONAL - migration_selection[url] """The url of the external tool""" if migration_selection_url is not None: data["migration_selection[url]"] = migration_selection_url # OPTIONAL - migration_selection[enabled] """Set this to enable this feature""" if migration_selection_enabled is not None: data["migration_selection[enabled]"] = migration_selection_enabled # OPTIONAL - migration_selection[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if migration_selection_message_type is not None: data["migration_selection[message_type]"] = migration_selection_message_type # OPTIONAL - tool_configuration[url] """The url of the external tool""" if tool_configuration_url is not None: data["tool_configuration[url]"] = tool_configuration_url # OPTIONAL - tool_configuration[enabled] """Set this to enable this feature""" if tool_configuration_enabled is not None: data["tool_configuration[enabled]"] = tool_configuration_enabled # OPTIONAL - tool_configuration[message_type] """Set this to ContentItemSelectionRequest to tell the tool to use content-item; otherwise, omit""" if tool_configuration_message_type is not None: data["tool_configuration[message_type]"] = tool_configuration_message_type # OPTIONAL - resource_selection[url] """The url of the external tool""" if resource_selection_url is not None: data["resource_selection[url]"] = resource_selection_url # OPTIONAL - resource_selection[enabled] """Set this to enable this feature""" if resource_selection_enabled is not None: data["resource_selection[enabled]"] = resource_selection_enabled # OPTIONAL - resource_selection[icon_url] """The url of the icon to show in the module external tool list""" if resource_selection_icon_url is not None: data["resource_selection[icon_url]"] = resource_selection_icon_url # OPTIONAL - resource_selection[selection_width] """The width of the dialog the tool is launched in""" if resource_selection_selection_width is not None: data["resource_selection[selection_width]"] = resource_selection_selection_width # OPTIONAL - resource_selection[selection_height] """The height of the dialog the tool is launched in""" if resource_selection_selection_height is not None: data["resource_selection[selection_height]"] = resource_selection_selection_height # OPTIONAL - config_type """Configuration can be passed in as CC xml instead of using query parameters. If this value is "by_url" or "by_xml" then an xml configuration will be expected in either the "config_xml" or "config_url" parameter. Note that the name parameter overrides the tool name provided in the xml""" if config_type is not None: data["config_type"] = config_type # OPTIONAL - config_xml """XML tool configuration, as specified in the CC xml specification. This is required if "config_type" is set to "by_xml"""" if config_xml is not None: data["config_xml"] = config_xml # OPTIONAL - config_url """URL where the server can retrieve an XML tool configuration, as specified in the CC xml specification. This is required if "config_type" is set to "by_url"""" if config_url is not None: data["config_url"] = config_url # OPTIONAL - not_selectable """Default: false, if set to true the tool won't show up in the external tool selection UI in modules and assignments""" if not_selectable is not None: data["not_selectable"] = not_selectable # OPTIONAL - oauth_compliant """Default: false, if set to true LTI query params will not be copied to the post body.""" if oauth_compliant is not None: data["oauth_compliant"] = oauth_compliant self.logger.debug("POST /api/v1/courses/{course_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/external_tools".format(**path), data=data, params=params, no_data=True)
[ "def", "create_external_tool_courses", "(", "self", ",", "name", ",", "course_id", ",", "consumer_key", ",", "privacy_level", ",", "shared_secret", ",", "account_navigation_enabled", "=", "None", ",", "account_navigation_selection_height", "=", "None", ",", "account_nav...
Create an external tool. Create an external tool in the specified course/account. The created tool will be returned, see the "show" endpoint for an example.
[ "Create", "an", "external", "tool", ".", "Create", "an", "external", "tool", "in", "the", "specified", "course", "/", "account", ".", "The", "created", "tool", "will", "be", "returned", "see", "the", "show", "endpoint", "for", "an", "example", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/external_tools.py#L254-L575
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.insert
def insert(self, key, value): """Insert a key-value pair in the list. The pair is inserted at the correct location so that the list remains sorted on *key*. If a pair with the same key is already in the list, then the pair is appended after all other pairs with that key. """ self._find_lte(key) node = self._create_node(key, value) self._insert(node)
python
def insert(self, key, value): """Insert a key-value pair in the list. The pair is inserted at the correct location so that the list remains sorted on *key*. If a pair with the same key is already in the list, then the pair is appended after all other pairs with that key. """ self._find_lte(key) node = self._create_node(key, value) self._insert(node)
[ "def", "insert", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "_find_lte", "(", "key", ")", "node", "=", "self", ".", "_create_node", "(", "key", ",", "value", ")", "self", ".", "_insert", "(", "node", ")" ]
Insert a key-value pair in the list. The pair is inserted at the correct location so that the list remains sorted on *key*. If a pair with the same key is already in the list, then the pair is appended after all other pairs with that key.
[ "Insert", "a", "key", "-", "value", "pair", "in", "the", "list", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L268-L277
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.replace
def replace(self, key, value): """Replace the value of the first key-value pair with key *key*. If the key was not found, the pair is inserted. """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: node = self._create_node(key, value) self._insert(node) else: node[1] = value
python
def replace(self, key, value): """Replace the value of the first key-value pair with key *key*. If the key was not found, the pair is inserted. """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: node = self._create_node(key, value) self._insert(node) else: node[1] = value
[ "def", "replace", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "_find_lt", "(", "key", ")", "node", "=", "self", ".", "_path", "[", "0", "]", "[", "2", "]", "if", "node", "is", "self", ".", "_tail", "or", "key", "<", "node", ...
Replace the value of the first key-value pair with key *key*. If the key was not found, the pair is inserted.
[ "Replace", "the", "value", "of", "the", "first", "key", "-", "value", "pair", "with", "key", "*", "key", "*", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L279-L290
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.clear
def clear(self): """Remove all key-value pairs.""" for i in range(self.maxlevel): self._head[2+i] = self._tail self._tail[-1] = 0 self._level = 1
python
def clear(self): """Remove all key-value pairs.""" for i in range(self.maxlevel): self._head[2+i] = self._tail self._tail[-1] = 0 self._level = 1
[ "def", "clear", "(", "self", ")", ":", "for", "i", "in", "range", "(", "self", ".", "maxlevel", ")", ":", "self", ".", "_head", "[", "2", "+", "i", "]", "=", "self", ".", "_tail", "self", ".", "_tail", "[", "-", "1", "]", "=", "0", "self", ...
Remove all key-value pairs.
[ "Remove", "all", "key", "-", "value", "pairs", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L292-L297
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.items
def items(self, start=None, stop=None): """Return an iterator yielding pairs. If *start* is specified, iteration starts at the first pair with a key that is larger than or equal to *start*. If not specified, iteration starts at the first pair in the list. If *stop* is specified, iteration stops at the last pair that is smaller than *stop*. If not specified, iteration end with the last pair in the list. """ if start is None: node = self._head[2] else: self._find_lt(start) node = self._path[0][2] while node is not self._tail and (stop is None or node[0] < stop): yield (node[0], node[1]) node = node[2]
python
def items(self, start=None, stop=None): """Return an iterator yielding pairs. If *start* is specified, iteration starts at the first pair with a key that is larger than or equal to *start*. If not specified, iteration starts at the first pair in the list. If *stop* is specified, iteration stops at the last pair that is smaller than *stop*. If not specified, iteration end with the last pair in the list. """ if start is None: node = self._head[2] else: self._find_lt(start) node = self._path[0][2] while node is not self._tail and (stop is None or node[0] < stop): yield (node[0], node[1]) node = node[2]
[ "def", "items", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "if", "start", "is", "None", ":", "node", "=", "self", ".", "_head", "[", "2", "]", "else", ":", "self", ".", "_find_lt", "(", "start", ")", "node", "="...
Return an iterator yielding pairs. If *start* is specified, iteration starts at the first pair with a key that is larger than or equal to *start*. If not specified, iteration starts at the first pair in the list. If *stop* is specified, iteration stops at the last pair that is smaller than *stop*. If not specified, iteration end with the last pair in the list.
[ "Return", "an", "iterator", "yielding", "pairs", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L315-L333
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.keys
def keys(self, start=None, stop=None): """Like :meth:`items` but returns only the keys.""" return (item[0] for item in self.items(start, stop))
python
def keys(self, start=None, stop=None): """Like :meth:`items` but returns only the keys.""" return (item[0] for item in self.items(start, stop))
[ "def", "keys", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "return", "(", "item", "[", "0", "]", "for", "item", "in", "self", ".", "items", "(", "start", ",", "stop", ")", ")" ]
Like :meth:`items` but returns only the keys.
[ "Like", ":", "meth", ":", "items", "but", "returns", "only", "the", "keys", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L337-L339
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.values
def values(self, start=None, stop=None): """Like :meth:`items` but returns only the values.""" return (item[1] for item in self.items(start, stop))
python
def values(self, start=None, stop=None): """Like :meth:`items` but returns only the values.""" return (item[1] for item in self.items(start, stop))
[ "def", "values", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "return", "(", "item", "[", "1", "]", "for", "item", "in", "self", ".", "items", "(", "start", ",", "stop", ")", ")" ]
Like :meth:`items` but returns only the values.
[ "Like", ":", "meth", ":", "items", "but", "returns", "only", "the", "values", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L341-L343
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.popitem
def popitem(self): """Removes the first key-value pair and return it. This method raises a ``KeyError`` if the list is empty. """ node = self._head[2] if node is self._tail: raise KeyError('list is empty') self._find_lt(node[0]) self._remove(node) return (node[0], node[1])
python
def popitem(self): """Removes the first key-value pair and return it. This method raises a ``KeyError`` if the list is empty. """ node = self._head[2] if node is self._tail: raise KeyError('list is empty') self._find_lt(node[0]) self._remove(node) return (node[0], node[1])
[ "def", "popitem", "(", "self", ")", ":", "node", "=", "self", ".", "_head", "[", "2", "]", "if", "node", "is", "self", ".", "_tail", ":", "raise", "KeyError", "(", "'list is empty'", ")", "self", ".", "_find_lt", "(", "node", "[", "0", "]", ")", ...
Removes the first key-value pair and return it. This method raises a ``KeyError`` if the list is empty.
[ "Removes", "the", "first", "key", "-", "value", "pair", "and", "return", "it", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L345-L355
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.search
def search(self, key, default=None): """Find the first key-value pair with key *key* and return its value. If the key was not found, return *default*. If no default was provided, return ``None``. This method never raises a ``KeyError``. """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: return default return node[1]
python
def search(self, key, default=None): """Find the first key-value pair with key *key* and return its value. If the key was not found, return *default*. If no default was provided, return ``None``. This method never raises a ``KeyError``. """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: return default return node[1]
[ "def", "search", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "self", ".", "_find_lt", "(", "key", ")", "node", "=", "self", ".", "_path", "[", "0", "]", "[", "2", "]", "if", "node", "is", "self", ".", "_tail", "or", "key", ...
Find the first key-value pair with key *key* and return its value. If the key was not found, return *default*. If no default was provided, return ``None``. This method never raises a ``KeyError``.
[ "Find", "the", "first", "key", "-", "value", "pair", "with", "key", "*", "key", "*", "and", "return", "its", "value", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L359-L369
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.remove
def remove(self, key): """Remove the first key-value pair with key *key*. If the key was not found, a ``KeyError`` is raised. """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: raise KeyError('{!r} is not in list'.format(key)) self._remove(node)
python
def remove(self, key): """Remove the first key-value pair with key *key*. If the key was not found, a ``KeyError`` is raised. """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: raise KeyError('{!r} is not in list'.format(key)) self._remove(node)
[ "def", "remove", "(", "self", ",", "key", ")", ":", "self", ".", "_find_lt", "(", "key", ")", "node", "=", "self", ".", "_path", "[", "0", "]", "[", "2", "]", "if", "node", "is", "self", ".", "_tail", "or", "key", "<", "node", "[", "0", "]", ...
Remove the first key-value pair with key *key*. If the key was not found, a ``KeyError`` is raised.
[ "Remove", "the", "first", "key", "-", "value", "pair", "with", "key", "*", "key", "*", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L371-L380
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.pop
def pop(self, key, default=UNSET): """Remove the first key-value pair with key *key*. If a pair was removed, return its value. Otherwise if *default* was provided, return *default*. Otherwise a ``KeyError`` is raised. """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: if default is self.UNSET: raise KeyError('key {!r} not in list') return default self._remove(node) return node[1]
python
def pop(self, key, default=UNSET): """Remove the first key-value pair with key *key*. If a pair was removed, return its value. Otherwise if *default* was provided, return *default*. Otherwise a ``KeyError`` is raised. """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: if default is self.UNSET: raise KeyError('key {!r} not in list') return default self._remove(node) return node[1]
[ "def", "pop", "(", "self", ",", "key", ",", "default", "=", "UNSET", ")", ":", "self", ".", "_find_lt", "(", "key", ")", "node", "=", "self", ".", "_path", "[", "0", "]", "[", "2", "]", "if", "node", "is", "self", ".", "_tail", "or", "key", "...
Remove the first key-value pair with key *key*. If a pair was removed, return its value. Otherwise if *default* was provided, return *default*. Otherwise a ``KeyError`` is raised.
[ "Remove", "the", "first", "key", "-", "value", "pair", "with", "key", "*", "key", "*", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L382-L395
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.index
def index(self, key, default=UNSET): """Find the first key-value pair with key *key* and return its position. If the key is not found, return *default*. If default was not provided, raise a ``KeyError`` """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: if default is self.UNSET: raise KeyError('key {!r} not in list'.format(key)) return default return self._distance[0]
python
def index(self, key, default=UNSET): """Find the first key-value pair with key *key* and return its position. If the key is not found, return *default*. If default was not provided, raise a ``KeyError`` """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: if default is self.UNSET: raise KeyError('key {!r} not in list'.format(key)) return default return self._distance[0]
[ "def", "index", "(", "self", ",", "key", ",", "default", "=", "UNSET", ")", ":", "self", ".", "_find_lt", "(", "key", ")", "node", "=", "self", ".", "_path", "[", "0", "]", "[", "2", "]", "if", "node", "is", "self", ".", "_tail", "or", "key", ...
Find the first key-value pair with key *key* and return its position. If the key is not found, return *default*. If default was not provided, raise a ``KeyError``
[ "Find", "the", "first", "key", "-", "value", "pair", "with", "key", "*", "key", "*", "and", "return", "its", "position", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L403-L415
geertj/pyskiplist
pyskiplist/skiplist.py
SkipList.count
def count(self, key): """Return the number of pairs with key *key*.""" count = 0 pos = self.index(key, -1) if pos == -1: return count count += 1 for i in range(pos+1, len(self)): if self[i][0] != key: break count += 1 return count
python
def count(self, key): """Return the number of pairs with key *key*.""" count = 0 pos = self.index(key, -1) if pos == -1: return count count += 1 for i in range(pos+1, len(self)): if self[i][0] != key: break count += 1 return count
[ "def", "count", "(", "self", ",", "key", ")", ":", "count", "=", "0", "pos", "=", "self", ".", "index", "(", "key", ",", "-", "1", ")", "if", "pos", "==", "-", "1", ":", "return", "count", "count", "+=", "1", "for", "i", "in", "range", "(", ...
Return the number of pairs with key *key*.
[ "Return", "the", "number", "of", "pairs", "with", "key", "*", "key", "*", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L417-L428
PGower/PyCanvas
pycanvas/apis/tabs.py
TabsAPI.update_tab_for_course
def update_tab_for_course(self, tab_id, course_id, hidden=None, position=None): """ Update a tab for a course. Home and Settings tabs are not manageable, and can't be hidden or moved Returns a tab object """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - tab_id """ID""" path["tab_id"] = tab_id # OPTIONAL - position """The new position of the tab, 1-based""" if position is not None: data["position"] = position # OPTIONAL - hidden """no description""" if hidden is not None: data["hidden"] = hidden self.logger.debug("PUT /api/v1/courses/{course_id}/tabs/{tab_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/tabs/{tab_id}".format(**path), data=data, params=params, single_item=True)
python
def update_tab_for_course(self, tab_id, course_id, hidden=None, position=None): """ Update a tab for a course. Home and Settings tabs are not manageable, and can't be hidden or moved Returns a tab object """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - tab_id """ID""" path["tab_id"] = tab_id # OPTIONAL - position """The new position of the tab, 1-based""" if position is not None: data["position"] = position # OPTIONAL - hidden """no description""" if hidden is not None: data["hidden"] = hidden self.logger.debug("PUT /api/v1/courses/{course_id}/tabs/{tab_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/tabs/{tab_id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_tab_for_course", "(", "self", ",", "tab_id", ",", "course_id", ",", "hidden", "=", "None", ",", "position", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\...
Update a tab for a course. Home and Settings tabs are not manageable, and can't be hidden or moved Returns a tab object
[ "Update", "a", "tab", "for", "a", "course", ".", "Home", "and", "Settings", "tabs", "are", "not", "manageable", "and", "can", "t", "be", "hidden", "or", "moved", "Returns", "a", "tab", "object" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/tabs.py#L65-L96
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select.select_all
def select_all(self, table, limit=MAX_ROWS_PER_QUERY, execute=True): """Query all rows and columns from a table.""" # Determine if a row per query limit should be set num_rows = self.count_rows(table) if num_rows > limit: return self._select_batched(table, '*', num_rows, limit, execute=execute) else: return self.select(table, '*', execute=execute)
python
def select_all(self, table, limit=MAX_ROWS_PER_QUERY, execute=True): """Query all rows and columns from a table.""" # Determine if a row per query limit should be set num_rows = self.count_rows(table) if num_rows > limit: return self._select_batched(table, '*', num_rows, limit, execute=execute) else: return self.select(table, '*', execute=execute)
[ "def", "select_all", "(", "self", ",", "table", ",", "limit", "=", "MAX_ROWS_PER_QUERY", ",", "execute", "=", "True", ")", ":", "# Determine if a row per query limit should be set", "num_rows", "=", "self", ".", "count_rows", "(", "table", ")", "if", "num_rows", ...
Query all rows and columns from a table.
[ "Query", "all", "rows", "and", "columns", "from", "a", "table", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L12-L19
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select.select_distinct
def select_distinct(self, table, cols='*', execute=True): """Query distinct values from a table.""" return self.select(table, cols, execute, select_type='SELECT DISTINCT')
python
def select_distinct(self, table, cols='*', execute=True): """Query distinct values from a table.""" return self.select(table, cols, execute, select_type='SELECT DISTINCT')
[ "def", "select_distinct", "(", "self", ",", "table", ",", "cols", "=", "'*'", ",", "execute", "=", "True", ")", ":", "return", "self", ".", "select", "(", "table", ",", "cols", ",", "execute", ",", "select_type", "=", "'SELECT DISTINCT'", ")" ]
Query distinct values from a table.
[ "Query", "distinct", "values", "from", "a", "table", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L21-L23
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select.select
def select(self, table, cols, execute=True, select_type='SELECT', return_type=list): """Query every row and only certain columns from a table.""" # Validate query type select_type = select_type.upper() assert select_type in SELECT_QUERY_TYPES # Concatenate statement statement = '{0} {1} FROM {2}'.format(select_type, join_cols(cols), wrap(table)) if not execute: # Return command return statement # Retrieve values values = self.fetch(statement) return self._return_rows(table, cols, values, return_type)
python
def select(self, table, cols, execute=True, select_type='SELECT', return_type=list): """Query every row and only certain columns from a table.""" # Validate query type select_type = select_type.upper() assert select_type in SELECT_QUERY_TYPES # Concatenate statement statement = '{0} {1} FROM {2}'.format(select_type, join_cols(cols), wrap(table)) if not execute: # Return command return statement # Retrieve values values = self.fetch(statement) return self._return_rows(table, cols, values, return_type)
[ "def", "select", "(", "self", ",", "table", ",", "cols", ",", "execute", "=", "True", ",", "select_type", "=", "'SELECT'", ",", "return_type", "=", "list", ")", ":", "# Validate query type", "select_type", "=", "select_type", ".", "upper", "(", ")", "asser...
Query every row and only certain columns from a table.
[ "Query", "every", "row", "and", "only", "certain", "columns", "from", "a", "table", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L25-L39
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select.select_join
def select_join(self, table1, table2, cols, table1_col, table2_col=None, join_type=None): """ Left join all rows and columns from two tables where a common value is shared. :param table1: Name of table #1 :param table2: Name of table #2 :param cols: List of columns or column tuples String or flat list: Assumes column(s) are from table #1 if not specified List of tuples: Each tuple in list of columns represents (table_name, column_name) :param table1_col: Column from table #1 to use as key :param table2_col: Column from table #2 to use as key :param join_type: Type of join query :return: Queried rows """ # Check if cols is a list of tuples if isinstance(cols[0], tuple): cols = join_cols(['{0}.{1}'.format(tbl, col) for tbl, col in cols]) else: cols = join_cols(['{0}.{1}'.format(table1, col) for col in cols]) # Validate join_type and table2_col join_type = join_type.lower().split(' ', 1)[0].upper() + ' JOIN' if join_type else 'LEFT JOIN' assert join_type in JOIN_QUERY_TYPES table2_col = table2_col if table2_col else table1_col # Concatenate and return statement statement = ''' SELECT {columns} FROM {table1} {join_type} {table2} ON {table1}.{table1_col} = {table2}.{table2_col} '''.format(table1=wrap(table1), table2=wrap(table2), columns=cols, table1_col=table1_col, table2_col=table2_col, join_type=join_type) return self.fetch(statement)
python
def select_join(self, table1, table2, cols, table1_col, table2_col=None, join_type=None): """ Left join all rows and columns from two tables where a common value is shared. :param table1: Name of table #1 :param table2: Name of table #2 :param cols: List of columns or column tuples String or flat list: Assumes column(s) are from table #1 if not specified List of tuples: Each tuple in list of columns represents (table_name, column_name) :param table1_col: Column from table #1 to use as key :param table2_col: Column from table #2 to use as key :param join_type: Type of join query :return: Queried rows """ # Check if cols is a list of tuples if isinstance(cols[0], tuple): cols = join_cols(['{0}.{1}'.format(tbl, col) for tbl, col in cols]) else: cols = join_cols(['{0}.{1}'.format(table1, col) for col in cols]) # Validate join_type and table2_col join_type = join_type.lower().split(' ', 1)[0].upper() + ' JOIN' if join_type else 'LEFT JOIN' assert join_type in JOIN_QUERY_TYPES table2_col = table2_col if table2_col else table1_col # Concatenate and return statement statement = ''' SELECT {columns} FROM {table1} {join_type} {table2} ON {table1}.{table1_col} = {table2}.{table2_col} '''.format(table1=wrap(table1), table2=wrap(table2), columns=cols, table1_col=table1_col, table2_col=table2_col, join_type=join_type) return self.fetch(statement)
[ "def", "select_join", "(", "self", ",", "table1", ",", "table2", ",", "cols", ",", "table1_col", ",", "table2_col", "=", "None", ",", "join_type", "=", "None", ")", ":", "# Check if cols is a list of tuples", "if", "isinstance", "(", "cols", "[", "0", "]", ...
Left join all rows and columns from two tables where a common value is shared. :param table1: Name of table #1 :param table2: Name of table #2 :param cols: List of columns or column tuples String or flat list: Assumes column(s) are from table #1 if not specified List of tuples: Each tuple in list of columns represents (table_name, column_name) :param table1_col: Column from table #1 to use as key :param table2_col: Column from table #2 to use as key :param join_type: Type of join query :return: Queried rows
[ "Left", "join", "all", "rows", "and", "columns", "from", "two", "tables", "where", "a", "common", "value", "is", "shared", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L41-L73
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select.select_limit
def select_limit(self, table, cols='*', offset=0, limit=MAX_ROWS_PER_QUERY): """Run a select query with an offset and limit parameter.""" return self.fetch(self._select_limit_statement(table, cols, offset, limit))
python
def select_limit(self, table, cols='*', offset=0, limit=MAX_ROWS_PER_QUERY): """Run a select query with an offset and limit parameter.""" return self.fetch(self._select_limit_statement(table, cols, offset, limit))
[ "def", "select_limit", "(", "self", ",", "table", ",", "cols", "=", "'*'", ",", "offset", "=", "0", ",", "limit", "=", "MAX_ROWS_PER_QUERY", ")", ":", "return", "self", ".", "fetch", "(", "self", ".", "_select_limit_statement", "(", "table", ",", "cols",...
Run a select query with an offset and limit parameter.
[ "Run", "a", "select", "query", "with", "an", "offset", "and", "limit", "parameter", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L75-L77
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select.select_where
def select_where(self, table, cols, where, return_type=list): """ Query certain rows from a table where a particular value is found. cols parameter can be passed as a iterable (list, set, tuple) or a string if only querying a single column. where parameter can be passed as a two or three part tuple. If only two parts are passed the assumed operator is equals(=). :param table: Name of table :param cols: List, tuple or set of columns or string with single column name :param where: WHERE clause, accepts either a two or three part tuple two-part: (where_column, where_value) three-part: (where_column, comparison_operator, where_value) :param return_type: Type, type to return values in :return: Queried rows """ # Unpack WHERE clause dictionary into tuple if isinstance(where, (list, set)): # Multiple WHERE clause's (separate with AND) clauses = [self._where_clause(clause) for clause in where] where_statement = ' AND '.join(clauses) else: where_statement = self._where_clause(where) # Concatenate full statement and execute statement = "SELECT {0} FROM {1} WHERE {2}".format(join_cols(cols), wrap(table), where_statement) values = self.fetch(statement) return self._return_rows(table, cols, values, return_type)
python
def select_where(self, table, cols, where, return_type=list): """ Query certain rows from a table where a particular value is found. cols parameter can be passed as a iterable (list, set, tuple) or a string if only querying a single column. where parameter can be passed as a two or three part tuple. If only two parts are passed the assumed operator is equals(=). :param table: Name of table :param cols: List, tuple or set of columns or string with single column name :param where: WHERE clause, accepts either a two or three part tuple two-part: (where_column, where_value) three-part: (where_column, comparison_operator, where_value) :param return_type: Type, type to return values in :return: Queried rows """ # Unpack WHERE clause dictionary into tuple if isinstance(where, (list, set)): # Multiple WHERE clause's (separate with AND) clauses = [self._where_clause(clause) for clause in where] where_statement = ' AND '.join(clauses) else: where_statement = self._where_clause(where) # Concatenate full statement and execute statement = "SELECT {0} FROM {1} WHERE {2}".format(join_cols(cols), wrap(table), where_statement) values = self.fetch(statement) return self._return_rows(table, cols, values, return_type)
[ "def", "select_where", "(", "self", ",", "table", ",", "cols", ",", "where", ",", "return_type", "=", "list", ")", ":", "# Unpack WHERE clause dictionary into tuple", "if", "isinstance", "(", "where", ",", "(", "list", ",", "set", ")", ")", ":", "# Multiple ...
Query certain rows from a table where a particular value is found. cols parameter can be passed as a iterable (list, set, tuple) or a string if only querying a single column. where parameter can be passed as a two or three part tuple. If only two parts are passed the assumed operator is equals(=). :param table: Name of table :param cols: List, tuple or set of columns or string with single column name :param where: WHERE clause, accepts either a two or three part tuple two-part: (where_column, where_value) three-part: (where_column, comparison_operator, where_value) :param return_type: Type, type to return values in :return: Queried rows
[ "Query", "certain", "rows", "from", "a", "table", "where", "a", "particular", "value", "is", "found", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L79-L106
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select.select_where_between
def select_where_between(self, table, cols, where_col, between): """ Query rows from a table where a columns value is found between two values. :param table: Name of the table :param cols: List, tuple or set of columns or string with single column name :param where_col: Column to check values against :param between: Tuple with min and max values for comparison :return: Queried rows """ # Unpack WHERE clause dictionary into tuple min_val, max_val = between # Concatenate full statement and execute statement = "SELECT {0} FROM {1} WHERE {2} BETWEEN {3} AND {4}".format(join_cols(cols), wrap(table), where_col, min_val, max_val) return self.fetch(statement)
python
def select_where_between(self, table, cols, where_col, between): """ Query rows from a table where a columns value is found between two values. :param table: Name of the table :param cols: List, tuple or set of columns or string with single column name :param where_col: Column to check values against :param between: Tuple with min and max values for comparison :return: Queried rows """ # Unpack WHERE clause dictionary into tuple min_val, max_val = between # Concatenate full statement and execute statement = "SELECT {0} FROM {1} WHERE {2} BETWEEN {3} AND {4}".format(join_cols(cols), wrap(table), where_col, min_val, max_val) return self.fetch(statement)
[ "def", "select_where_between", "(", "self", ",", "table", ",", "cols", ",", "where_col", ",", "between", ")", ":", "# Unpack WHERE clause dictionary into tuple", "min_val", ",", "max_val", "=", "between", "# Concatenate full statement and execute", "statement", "=", "\"...
Query rows from a table where a columns value is found between two values. :param table: Name of the table :param cols: List, tuple or set of columns or string with single column name :param where_col: Column to check values against :param between: Tuple with min and max values for comparison :return: Queried rows
[ "Query", "rows", "from", "a", "table", "where", "a", "columns", "value", "is", "found", "between", "two", "values", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L108-L124
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select.select_where_like
def select_where_like(self, table, cols, where_col, start=None, end=None, anywhere=None, index=(None, None), length=None): """ Query rows from a table where a specific pattern is found in a column. MySQL syntax assumptions: (%) The percent sign represents zero, one, or multiple characters. (_) The underscore represents a single character. :param table: Name of the table :param cols: List, tuple or set of columns or string with single column name :param where_col: Column to check pattern against :param start: Value to be found at the start :param end: Value to be found at the end :param anywhere: Value to be found anywhere :param index: Value to be found at a certain index :param length: Minimum character length :return: Queried rows """ # Retrieve search pattern pattern = self._like_pattern(start, end, anywhere, index, length) # Concatenate full statement and execute statement = "SELECT {0} FROM {1} WHERE {2} LIKE '{3}'".format(join_cols(cols), wrap(table), where_col, pattern) return self.fetch(statement)
python
def select_where_like(self, table, cols, where_col, start=None, end=None, anywhere=None, index=(None, None), length=None): """ Query rows from a table where a specific pattern is found in a column. MySQL syntax assumptions: (%) The percent sign represents zero, one, or multiple characters. (_) The underscore represents a single character. :param table: Name of the table :param cols: List, tuple or set of columns or string with single column name :param where_col: Column to check pattern against :param start: Value to be found at the start :param end: Value to be found at the end :param anywhere: Value to be found anywhere :param index: Value to be found at a certain index :param length: Minimum character length :return: Queried rows """ # Retrieve search pattern pattern = self._like_pattern(start, end, anywhere, index, length) # Concatenate full statement and execute statement = "SELECT {0} FROM {1} WHERE {2} LIKE '{3}'".format(join_cols(cols), wrap(table), where_col, pattern) return self.fetch(statement)
[ "def", "select_where_like", "(", "self", ",", "table", ",", "cols", ",", "where_col", ",", "start", "=", "None", ",", "end", "=", "None", ",", "anywhere", "=", "None", ",", "index", "=", "(", "None", ",", "None", ")", ",", "length", "=", "None", ")...
Query rows from a table where a specific pattern is found in a column. MySQL syntax assumptions: (%) The percent sign represents zero, one, or multiple characters. (_) The underscore represents a single character. :param table: Name of the table :param cols: List, tuple or set of columns or string with single column name :param where_col: Column to check pattern against :param start: Value to be found at the start :param end: Value to be found at the end :param anywhere: Value to be found anywhere :param index: Value to be found at a certain index :param length: Minimum character length :return: Queried rows
[ "Query", "rows", "from", "a", "table", "where", "a", "specific", "pattern", "is", "found", "in", "a", "column", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L126-L150
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select._where_clause
def _where_clause(where): """ Unpack a where clause tuple and concatenate a MySQL WHERE statement. :param where: 2 or 3 part tuple containing a where_column and a where_value (optional operator) :return: WHERE clause statement """ assert isinstance(where, tuple) if len(where) == 3: where_col, operator, where_val = where else: where_col, where_val = where operator = '=' assert operator in SELECT_WHERE_OPERATORS # Concatenate WHERE clause (ex: **first_name='John'**) return "{0}{1}'{2}'".format(where_col, operator, where_val)
python
def _where_clause(where): """ Unpack a where clause tuple and concatenate a MySQL WHERE statement. :param where: 2 or 3 part tuple containing a where_column and a where_value (optional operator) :return: WHERE clause statement """ assert isinstance(where, tuple) if len(where) == 3: where_col, operator, where_val = where else: where_col, where_val = where operator = '=' assert operator in SELECT_WHERE_OPERATORS # Concatenate WHERE clause (ex: **first_name='John'**) return "{0}{1}'{2}'".format(where_col, operator, where_val)
[ "def", "_where_clause", "(", "where", ")", ":", "assert", "isinstance", "(", "where", ",", "tuple", ")", "if", "len", "(", "where", ")", "==", "3", ":", "where_col", ",", "operator", ",", "where_val", "=", "where", "else", ":", "where_col", ",", "where...
Unpack a where clause tuple and concatenate a MySQL WHERE statement. :param where: 2 or 3 part tuple containing a where_column and a where_value (optional operator) :return: WHERE clause statement
[ "Unpack", "a", "where", "clause", "tuple", "and", "concatenate", "a", "MySQL", "WHERE", "statement", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L153-L169
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select._return_rows
def _return_rows(self, table, cols, values, return_type): """Return fetched rows in the desired type.""" if return_type is dict: # Pack each row into a dictionary cols = self.get_columns(table) if cols is '*' else cols if len(values) > 0 and isinstance(values[0], (set, list, tuple)): return [dict(zip(cols, row)) for row in values] else: return dict(zip(cols, values)) elif return_type is tuple: return [tuple(row) for row in values] else: return values
python
def _return_rows(self, table, cols, values, return_type): """Return fetched rows in the desired type.""" if return_type is dict: # Pack each row into a dictionary cols = self.get_columns(table) if cols is '*' else cols if len(values) > 0 and isinstance(values[0], (set, list, tuple)): return [dict(zip(cols, row)) for row in values] else: return dict(zip(cols, values)) elif return_type is tuple: return [tuple(row) for row in values] else: return values
[ "def", "_return_rows", "(", "self", ",", "table", ",", "cols", ",", "values", ",", "return_type", ")", ":", "if", "return_type", "is", "dict", ":", "# Pack each row into a dictionary", "cols", "=", "self", ".", "get_columns", "(", "table", ")", "if", "cols",...
Return fetched rows in the desired type.
[ "Return", "fetched", "rows", "in", "the", "desired", "type", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L171-L183
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select._select_batched
def _select_batched(self, table, cols, num_rows, limit, queries_per_batch=3, execute=True): """Run select queries in small batches and return joined resutls.""" # Execute select queries in small batches to avoid connection timeout commands, offset = [], 0 while num_rows > 0: # Use number of rows as limit if num_rows < limit _limit = min(limit, num_rows) # Execute select_limit query commands.append(self._select_limit_statement(table, cols=cols, offset=offset, limit=limit)) offset += _limit num_rows += -_limit # Execute commands if execute: rows = [] til_reconnect = queries_per_batch for c in commands: if til_reconnect == 0: self.disconnect() self.reconnect() til_reconnect = queries_per_batch rows.extend(self.fetch(c, False)) til_reconnect += -1 del commands return rows # Return commands else: return commands
python
def _select_batched(self, table, cols, num_rows, limit, queries_per_batch=3, execute=True): """Run select queries in small batches and return joined resutls.""" # Execute select queries in small batches to avoid connection timeout commands, offset = [], 0 while num_rows > 0: # Use number of rows as limit if num_rows < limit _limit = min(limit, num_rows) # Execute select_limit query commands.append(self._select_limit_statement(table, cols=cols, offset=offset, limit=limit)) offset += _limit num_rows += -_limit # Execute commands if execute: rows = [] til_reconnect = queries_per_batch for c in commands: if til_reconnect == 0: self.disconnect() self.reconnect() til_reconnect = queries_per_batch rows.extend(self.fetch(c, False)) til_reconnect += -1 del commands return rows # Return commands else: return commands
[ "def", "_select_batched", "(", "self", ",", "table", ",", "cols", ",", "num_rows", ",", "limit", ",", "queries_per_batch", "=", "3", ",", "execute", "=", "True", ")", ":", "# Execute select queries in small batches to avoid connection timeout", "commands", ",", "off...
Run select queries in small batches and return joined resutls.
[ "Run", "select", "queries", "in", "small", "batches", "and", "return", "joined", "resutls", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L185-L213
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select._select_limit_statement
def _select_limit_statement(table, cols='*', offset=0, limit=MAX_ROWS_PER_QUERY): """Concatenate a select with offset and limit statement.""" return 'SELECT {0} FROM {1} LIMIT {2}, {3}'.format(join_cols(cols), wrap(table), offset, limit)
python
def _select_limit_statement(table, cols='*', offset=0, limit=MAX_ROWS_PER_QUERY): """Concatenate a select with offset and limit statement.""" return 'SELECT {0} FROM {1} LIMIT {2}, {3}'.format(join_cols(cols), wrap(table), offset, limit)
[ "def", "_select_limit_statement", "(", "table", ",", "cols", "=", "'*'", ",", "offset", "=", "0", ",", "limit", "=", "MAX_ROWS_PER_QUERY", ")", ":", "return", "'SELECT {0} FROM {1} LIMIT {2}, {3}'", ".", "format", "(", "join_cols", "(", "cols", ")", ",", "wrap...
Concatenate a select with offset and limit statement.
[ "Concatenate", "a", "select", "with", "offset", "and", "limit", "statement", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L216-L218
mrstephenneal/mysql-toolkit
mysql/toolkit/components/manipulate/select.py
Select._like_pattern
def _like_pattern(start, end, anywhere, index, length): """ Create a LIKE pattern to use as a search parameter for a WHERE clause. :param start: Value to be found at the start :param end: Value to be found at the end :param anywhere: Value to be found anywhere :param index: Value to be found at a certain index :param length: Minimum character length :return: WHERE pattern """ # Unpack index tuple index_num, index_char = index index = None # Start, end, anywhere if all(i for i in [start, end, anywhere]) and not any(i for i in [index, length]): return '{start}%{anywhere}%{end}'.format(start=start, end=end, anywhere=anywhere) # Start, end elif all(i for i in [start, end]) and not any(i for i in [anywhere, index, length]): return '{start}%{end}'.format(start=start, end=end) # Start, anywhere elif all(i for i in [start, anywhere]) and not any(i for i in [end, index, length]): return '{start}%{anywhere}%'.format(start=start, anywhere=anywhere) # End, anywhere elif all(i for i in [end, anywhere]) and not any(i for i in [start, index, length]): return '%{anywhere}%{end}'.format(end=end, anywhere=anywhere) # Start elif start and not any(i for i in [end, anywhere, index, length]): return '{start}%'.format(start=start) # End elif end and not any(i for i in [start, anywhere, index, length]): return '%{end}'.format(end=end) # Anywhere elif anywhere and not any(i for i in [start, end, index, length]): return '%{anywhere}%'.format(anywhere=anywhere) # Index elif index_num and index_char and not any(i for i in [start, end, anywhere, length]): return '{index_num}{index_char}%'.format(index_num='_' * (index_num + 1), index_char=index_char) # Length elif length and not any(i for i in [start, end, anywhere, index]): return '{length}'.format(length='_%' * length) else: return None
python
def _like_pattern(start, end, anywhere, index, length): """ Create a LIKE pattern to use as a search parameter for a WHERE clause. :param start: Value to be found at the start :param end: Value to be found at the end :param anywhere: Value to be found anywhere :param index: Value to be found at a certain index :param length: Minimum character length :return: WHERE pattern """ # Unpack index tuple index_num, index_char = index index = None # Start, end, anywhere if all(i for i in [start, end, anywhere]) and not any(i for i in [index, length]): return '{start}%{anywhere}%{end}'.format(start=start, end=end, anywhere=anywhere) # Start, end elif all(i for i in [start, end]) and not any(i for i in [anywhere, index, length]): return '{start}%{end}'.format(start=start, end=end) # Start, anywhere elif all(i for i in [start, anywhere]) and not any(i for i in [end, index, length]): return '{start}%{anywhere}%'.format(start=start, anywhere=anywhere) # End, anywhere elif all(i for i in [end, anywhere]) and not any(i for i in [start, index, length]): return '%{anywhere}%{end}'.format(end=end, anywhere=anywhere) # Start elif start and not any(i for i in [end, anywhere, index, length]): return '{start}%'.format(start=start) # End elif end and not any(i for i in [start, anywhere, index, length]): return '%{end}'.format(end=end) # Anywhere elif anywhere and not any(i for i in [start, end, index, length]): return '%{anywhere}%'.format(anywhere=anywhere) # Index elif index_num and index_char and not any(i for i in [start, end, anywhere, length]): return '{index_num}{index_char}%'.format(index_num='_' * (index_num + 1), index_char=index_char) # Length elif length and not any(i for i in [start, end, anywhere, index]): return '{length}'.format(length='_%' * length) else: return None
[ "def", "_like_pattern", "(", "start", ",", "end", ",", "anywhere", ",", "index", ",", "length", ")", ":", "# Unpack index tuple", "index_num", ",", "index_char", "=", "index", "index", "=", "None", "# Start, end, anywhere", "if", "all", "(", "i", "for", "i",...
Create a LIKE pattern to use as a search parameter for a WHERE clause. :param start: Value to be found at the start :param end: Value to be found at the end :param anywhere: Value to be found anywhere :param index: Value to be found at a certain index :param length: Minimum character length :return: WHERE pattern
[ "Create", "a", "LIKE", "pattern", "to", "use", "as", "a", "search", "parameter", "for", "a", "WHERE", "clause", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/manipulate/select.py#L221-L273