repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
novopl/peltak
src/peltak/extra/gitflow/logic/task.py
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/task.py#L117-L133
def merged(): # type: () -> None """ Cleanup a remotely merged branch. """ base_branch = common.get_base_branch() branch = git.current_branch(refresh=True) common.assert_branch_type('task') # Pull feature branch with the merged task common.git_checkout(base_branch) common.git_pull(base_branch) # Cleanup common.git_branch_delete(branch.name) common.git_prune() common.git_checkout(base_branch)
[ "def", "merged", "(", ")", ":", "# type: () -> None", "base_branch", "=", "common", ".", "get_base_branch", "(", ")", "branch", "=", "git", ".", "current_branch", "(", "refresh", "=", "True", ")", "common", ".", "assert_branch_type", "(", "'task'", ")", "# P...
Cleanup a remotely merged branch.
[ "Cleanup", "a", "remotely", "merged", "branch", "." ]
python
train
saltstack/salt
salt/modules/boto_vpc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_vpc.py#L2008-L2045
def delete_network_acl_entry(network_acl_id=None, rule_number=None, egress=None, network_acl_name=None, region=None, key=None, keyid=None, profile=None): ''' Deletes a network acl entry. CLI Example: .. code-block:: bash salt myminion boto_vpc.delete_network_acl_entry 'acl-5fb85d36' '32767' ''' if not _exactly_one((network_acl_name, network_acl_id)): raise SaltInvocationError('One (but not both) of network_acl_id or ' 'network_acl_name must be provided.') for v in ('rule_number', 'egress'): if locals()[v] is None: raise SaltInvocationError('{0} is required.'.format(v)) if network_acl_name: network_acl_id = _get_resource_id('network_acl', network_acl_name, region=region, key=key, keyid=keyid, profile=profile) if not network_acl_id: return {'deleted': False, 'error': {'message': 'Network ACL {0} does not exist.'.format(network_acl_name or network_acl_id)}} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_network_acl_entry(network_acl_id, rule_number, egress=egress) if deleted: log.info('Network ACL entry was deleted') else: log.warning('Network ACL was not deleted') return {'deleted': deleted} except BotoServerError as e: return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
[ "def", "delete_network_acl_entry", "(", "network_acl_id", "=", "None", ",", "rule_number", "=", "None", ",", "egress", "=", "None", ",", "network_acl_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", ...
Deletes a network acl entry. CLI Example: .. code-block:: bash salt myminion boto_vpc.delete_network_acl_entry 'acl-5fb85d36' '32767'
[ "Deletes", "a", "network", "acl", "entry", "." ]
python
train
clld/clldutils
src/clldutils/sfm.py
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/sfm.py#L71-L76
def get(self, key, default=None): """Retrieve the first value for a marker or None.""" for k, v in self: if k == key: return v return default
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "for", "k", ",", "v", "in", "self", ":", "if", "k", "==", "key", ":", "return", "v", "return", "default" ]
Retrieve the first value for a marker or None.
[ "Retrieve", "the", "first", "value", "for", "a", "marker", "or", "None", "." ]
python
train
sanger-pathogens/circlator
circlator/merge.py
https://github.com/sanger-pathogens/circlator/blob/a4befb8c9dbbcd4b3ad1899a95aa3e689d58b638/circlator/merge.py#L740-L756
def _get_spades_circular_nodes(self, fastg): '''Returns set of names of nodes in SPAdes fastg file that are circular. Names will match those in spades fasta file''' seq_reader = pyfastaq.sequences.file_reader(fastg) names = set([x.id.rstrip(';') for x in seq_reader if ':' in x.id]) found_fwd = set() found_rev = set() for name in names: l = name.split(':') if len(l) != 2: continue if l[0] == l[1]: if l[0][-1] == "'": found_rev.add(l[0][:-1]) else: found_fwd.add(l[0]) return found_fwd.intersection(found_rev)
[ "def", "_get_spades_circular_nodes", "(", "self", ",", "fastg", ")", ":", "seq_reader", "=", "pyfastaq", ".", "sequences", ".", "file_reader", "(", "fastg", ")", "names", "=", "set", "(", "[", "x", ".", "id", ".", "rstrip", "(", "';'", ")", "for", "x",...
Returns set of names of nodes in SPAdes fastg file that are circular. Names will match those in spades fasta file
[ "Returns", "set", "of", "names", "of", "nodes", "in", "SPAdes", "fastg", "file", "that", "are", "circular", ".", "Names", "will", "match", "those", "in", "spades", "fasta", "file" ]
python
train
titilambert/pyebox
pyebox/client.py
https://github.com/titilambert/pyebox/blob/f35fb75ab5f0df38e1d16a0420e4c13b4908c63d/pyebox/client.py#L53-L71
async def _get_login_page(self): """Go to the login page.""" try: async with async_timeout.timeout(10): raw_res = await self._session.get(HOME_URL, allow_redirects=False, timeout=self._timeout) except OSError: raise PyEboxError("Can not connect to login page") # Get token content = await raw_res.text() soup = BeautifulSoup(content, 'html.parser') token_node = soup.find('input', {'name': '_csrf_security_token'}) if token_node is None: raise PyEboxError("No token input found") token = token_node.attrs.get('value') if token is None: raise PyEboxError("No token found") return token
[ "async", "def", "_get_login_page", "(", "self", ")", ":", "try", ":", "async", "with", "async_timeout", ".", "timeout", "(", "10", ")", ":", "raw_res", "=", "await", "self", ".", "_session", ".", "get", "(", "HOME_URL", ",", "allow_redirects", "=", "Fals...
Go to the login page.
[ "Go", "to", "the", "login", "page", "." ]
python
train
ploneintranet/ploneintranet.workspace
src/ploneintranet/workspace/browser/views.py
https://github.com/ploneintranet/ploneintranet.workspace/blob/a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba/src/ploneintranet/workspace/browser/views.py#L47-L56
def role_settings(self): """ Filter out unwanted to show groups """ result = super(SharingView, self).role_settings() uid = self.context.UID() filter_func = lambda x: not any(( x["id"].endswith(uid), x["id"] == "AuthenticatedUsers", x["id"] == INTRANET_USERS_GROUP_ID, )) return filter(filter_func, result)
[ "def", "role_settings", "(", "self", ")", ":", "result", "=", "super", "(", "SharingView", ",", "self", ")", ".", "role_settings", "(", ")", "uid", "=", "self", ".", "context", ".", "UID", "(", ")", "filter_func", "=", "lambda", "x", ":", "not", "any...
Filter out unwanted to show groups
[ "Filter", "out", "unwanted", "to", "show", "groups" ]
python
train
PmagPy/PmagPy
pmagpy/validate_upload3.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/validate_upload3.py#L578-L585
def split_func(string): """ Take a string like 'requiredIf("arg_name")' return the function name and the argument: (requiredIf, arg_name) """ ind = string.index("(") return string[:ind], string[ind+1:-1].strip('"')
[ "def", "split_func", "(", "string", ")", ":", "ind", "=", "string", ".", "index", "(", "\"(\"", ")", "return", "string", "[", ":", "ind", "]", ",", "string", "[", "ind", "+", "1", ":", "-", "1", "]", ".", "strip", "(", "'\"'", ")" ]
Take a string like 'requiredIf("arg_name")' return the function name and the argument: (requiredIf, arg_name)
[ "Take", "a", "string", "like", "requiredIf", "(", "arg_name", ")", "return", "the", "function", "name", "and", "the", "argument", ":", "(", "requiredIf", "arg_name", ")" ]
python
train
zrong/rookout
rookout/conf.py
https://github.com/zrong/rookout/blob/f94871a564274625768f2da63507d120232ca717/rookout/conf.py#L68-L81
def dump(self, human=False): """将自身内容打印成字符串 :param bool human: 若值为 True ,则打印成易读格式。 """ txt = str(self) if human: txt = txt.replace(", '", ",\n'") txt = txt.replace("{", "{\n") txt = txt.replace("}", "\n}") txt = txt.replace("[", "[\n") txt = txt.replace("]", "\n]") return txt
[ "def", "dump", "(", "self", ",", "human", "=", "False", ")", ":", "txt", "=", "str", "(", "self", ")", "if", "human", ":", "txt", "=", "txt", ".", "replace", "(", "\", '\"", ",", "\",\\n'\"", ")", "txt", "=", "txt", ".", "replace", "(", "\"{\"", ...
将自身内容打印成字符串 :param bool human: 若值为 True ,则打印成易读格式。
[ "将自身内容打印成字符串" ]
python
train
glormph/msstitch
src/app/readers/openms.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/openms.py#L31-L42
def get_feature_info(feature): """Returns a dict with feature information""" dimensions = feature.findall('position') for dim in dimensions: if dim.attrib['dim'] == '0': rt = dim.text elif dim.attrib['dim'] == '1': mz = dim.text return {'rt': float(rt), 'mz': float(mz), 'charge': int(feature.find('charge').text), 'intensity': float(feature.find('intensity').text), }
[ "def", "get_feature_info", "(", "feature", ")", ":", "dimensions", "=", "feature", ".", "findall", "(", "'position'", ")", "for", "dim", "in", "dimensions", ":", "if", "dim", ".", "attrib", "[", "'dim'", "]", "==", "'0'", ":", "rt", "=", "dim", ".", ...
Returns a dict with feature information
[ "Returns", "a", "dict", "with", "feature", "information" ]
python
train
fhcrc/taxtastic
taxtastic/taxonomy.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L195-L209
def _get_merged(self, tax_id): """Returns tax_id into which `tax_id` has been merged or `tax_id` of not obsolete. """ cmd = """ SELECT COALESCE( (SELECT new_tax_id FROM {merged} WHERE old_tax_id = {x}), {x}) """.format(x=self.placeholder, merged=self.merged) with self.engine.connect() as con: result = con.execute(cmd, (tax_id, tax_id)) return result.fetchone()[0]
[ "def", "_get_merged", "(", "self", ",", "tax_id", ")", ":", "cmd", "=", "\"\"\"\n SELECT COALESCE(\n (SELECT new_tax_id FROM {merged}\n WHERE old_tax_id = {x}), {x})\n \"\"\"", ".", "format", "(", "x", "=", "self", ".", "placeholder", ",", "merged...
Returns tax_id into which `tax_id` has been merged or `tax_id` of not obsolete.
[ "Returns", "tax_id", "into", "which", "tax_id", "has", "been", "merged", "or", "tax_id", "of", "not", "obsolete", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/compiler_frontend.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/compiler_frontend.py#L190-L209
def _get_inline_fragment(ast): """Return the inline fragment at the current AST node, or None if no fragment exists.""" if not ast.selection_set: # There is nothing selected here, so no fragment. return None fragments = [ ast_node for ast_node in ast.selection_set.selections if isinstance(ast_node, InlineFragment) ] if not fragments: return None if len(fragments) > 1: raise GraphQLCompilationError(u'Cannot compile GraphQL with more than one fragment in ' u'a given selection set.') return fragments[0]
[ "def", "_get_inline_fragment", "(", "ast", ")", ":", "if", "not", "ast", ".", "selection_set", ":", "# There is nothing selected here, so no fragment.", "return", "None", "fragments", "=", "[", "ast_node", "for", "ast_node", "in", "ast", ".", "selection_set", ".", ...
Return the inline fragment at the current AST node, or None if no fragment exists.
[ "Return", "the", "inline", "fragment", "at", "the", "current", "AST", "node", "or", "None", "if", "no", "fragment", "exists", "." ]
python
train
lpantano/seqcluster
seqcluster/prepare_data.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/prepare_data.py#L45-L77
def _read_fasta_files(f, args): """ read fasta files of each sample and generate a seq_obj with the information of each unique sequence in each sample :param f: file containing the path for each fasta file and the name of the sample. Two column format with `tab` as field separator :returns: * :code:`seq_l`: is a list of seq_obj objects, containing the information of each sequence * :code:`sample_l`: is a list with the name of the samples (column two of the config file) """ seq_l = {} sample_l = [] idx = 1 for line1 in f: line1 = line1.strip() cols = line1.split("\t") with open(cols[0], 'r') as fasta: sample_l.append(cols[1]) for line in fasta: if line.startswith(">"): idx += 1 counts = int(re.search("x([0-9]+)", line.strip()).group(1)) else: seq = line.strip() seq = seq[0:int(args.maxl)] if len(seq) > int(args.maxl) else seq if counts > int(args.minc) and len(seq) > int(args.minl): if seq not in seq_l: seq_l[seq] = sequence_unique(idx, seq) seq_l[seq].add_exp(cols[1], counts) return seq_l, sample_l
[ "def", "_read_fasta_files", "(", "f", ",", "args", ")", ":", "seq_l", "=", "{", "}", "sample_l", "=", "[", "]", "idx", "=", "1", "for", "line1", "in", "f", ":", "line1", "=", "line1", ".", "strip", "(", ")", "cols", "=", "line1", ".", "split", ...
read fasta files of each sample and generate a seq_obj with the information of each unique sequence in each sample :param f: file containing the path for each fasta file and the name of the sample. Two column format with `tab` as field separator :returns: * :code:`seq_l`: is a list of seq_obj objects, containing the information of each sequence * :code:`sample_l`: is a list with the name of the samples (column two of the config file)
[ "read", "fasta", "files", "of", "each", "sample", "and", "generate", "a", "seq_obj", "with", "the", "information", "of", "each", "unique", "sequence", "in", "each", "sample" ]
python
train
not-na/peng3d
peng3d/model.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/model.py#L1039-L1086
def setAnimation(self,obj,animation,transition=None,force=False): """ Sets the animation to be used by the object. See :py:meth:`Actor.setAnimation()` for more information. """ self.ensureModelData(obj) data = obj._modeldata # Validity check if animation not in self.modeldata["animations"]: raise ValueError("There is no animation of name '%s' for model '%s'"%(animation,self.modelname)) if data.get("_anidata",{}).get("anitype",None)==animation and not force: return # animation is already running # Cache the obj to improve readability anim = self.modeldata["animations"][animation] # Set to default if not set if transition is None: transition = anim.default_jt # Notify the animation to allow it to initialize itself anim.startAnimation(data,transition) # initialize animation data if "_anidata" not in data: data["_anidata"]={} adata = data["_anidata"] adata["anitype"]=animation if "_schedfunc" in adata: # unschedule the old animation, if any # prevents clashing and crashes pyglet.clock.unschedule(adata["_schedfunc"]) # Schedule the animation function def schedfunc(*args): # This function is defined locally to create a closure # The closure stores the local variables, e.g. anim and data even after the parent function has finished # Note that this may also prevent the garbage collection of any objects defined in the parent scope anim.tickEntity(data) # register the function to pyglet pyglet.clock.schedule_interval(schedfunc,1./(anim.kps if anim.atype=="keyframes" else 60)) # save it for later for de-initialization adata["_schedfunc"] = schedfunc
[ "def", "setAnimation", "(", "self", ",", "obj", ",", "animation", ",", "transition", "=", "None", ",", "force", "=", "False", ")", ":", "self", ".", "ensureModelData", "(", "obj", ")", "data", "=", "obj", ".", "_modeldata", "# Validity check", "if", "ani...
Sets the animation to be used by the object. See :py:meth:`Actor.setAnimation()` for more information.
[ "Sets", "the", "animation", "to", "be", "used", "by", "the", "object", ".", "See", ":", "py", ":", "meth", ":", "Actor", ".", "setAnimation", "()", "for", "more", "information", "." ]
python
test
gitenberg-dev/gitberg
gitenberg/workflow.py
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/workflow.py#L15-L41
def upload_all_books(book_id_start, book_id_end, rdf_library=None): """ Uses the fetch, make, push subcommands to mirror Project Gutenberg to a github3 api """ # TODO refactor appname into variable logger.info( "starting a gitberg mass upload: {0} -> {1}".format( book_id_start, book_id_end ) ) for book_id in range(int(book_id_start), int(book_id_end) + 1): cache = {} errors = 0 try: if int(book_id) in missing_pgid: print(u'missing\t{}'.format(book_id)) continue upload_book(book_id, rdf_library=rdf_library, cache=cache) except Exception as e: print(u'error\t{}'.format(book_id)) logger.error(u"Error processing: {}\r{}".format(book_id, e)) errors += 1 if errors > 10: print('error limit reached!') break
[ "def", "upload_all_books", "(", "book_id_start", ",", "book_id_end", ",", "rdf_library", "=", "None", ")", ":", "# TODO refactor appname into variable", "logger", ".", "info", "(", "\"starting a gitberg mass upload: {0} -> {1}\"", ".", "format", "(", "book_id_start", ",",...
Uses the fetch, make, push subcommands to mirror Project Gutenberg to a github3 api
[ "Uses", "the", "fetch", "make", "push", "subcommands", "to", "mirror", "Project", "Gutenberg", "to", "a", "github3", "api" ]
python
train
KelSolaar/Manager
manager/components_manager.py
https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L473-L484
def version(self, value): """ Setter for **self.__version** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "version", value) self.__version = value
[ "def", "version", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"version\"", ",", "value", "...
Setter for **self.__version** attribute. :param value: Attribute value. :type value: unicode
[ "Setter", "for", "**", "self", ".", "__version", "**", "attribute", "." ]
python
train
log2timeline/dfvfs
dfvfs/file_io/encoded_stream_io.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encoded_stream_io.py#L52-L65
def _Close(self): """Closes the file-like object. If the file-like object was passed in the init function the encoded stream file-like object does not control the file-like object and should not actually close it. """ if not self._file_object_set_in_init: self._file_object.close() self._file_object = None self._decoder = None self._decoded_data = b'' self._encoded_data = b''
[ "def", "_Close", "(", "self", ")", ":", "if", "not", "self", ".", "_file_object_set_in_init", ":", "self", ".", "_file_object", ".", "close", "(", ")", "self", ".", "_file_object", "=", "None", "self", ".", "_decoder", "=", "None", "self", ".", "_decoded...
Closes the file-like object. If the file-like object was passed in the init function the encoded stream file-like object does not control the file-like object and should not actually close it.
[ "Closes", "the", "file", "-", "like", "object", "." ]
python
train
GPflow/GPflow
gpflow/expectations.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/expectations.py#L675-L683
def _expectation(p, mean, none1, none2, none3, nghp=None): """ Compute the expectation: <m(X)>_p(X) - m(x) :: Linear, Identity or Constant mean function :return: NxQ """ return mean(p.mu)
[ "def", "_expectation", "(", "p", ",", "mean", ",", "none1", ",", "none2", ",", "none3", ",", "nghp", "=", "None", ")", ":", "return", "mean", "(", "p", ".", "mu", ")" ]
Compute the expectation: <m(X)>_p(X) - m(x) :: Linear, Identity or Constant mean function :return: NxQ
[ "Compute", "the", "expectation", ":", "<m", "(", "X", ")", ">", "_p", "(", "X", ")", "-", "m", "(", "x", ")", "::", "Linear", "Identity", "or", "Constant", "mean", "function" ]
python
train
heronotears/lazyxml
lazyxml/builder.py
https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/builder.py#L145-L154
def safedata(self, data, cdata=True): r"""Convert xml special chars to entities. :param data: the data will be converted safe. :param cdata: whether to use cdata. Default:``True``. If not, use :func:`cgi.escape` to convert data. :type cdata: bool :rtype: str """ safe = ('<![CDATA[%s]]>' % data) if cdata else cgi.escape(str(data), True) return safe
[ "def", "safedata", "(", "self", ",", "data", ",", "cdata", "=", "True", ")", ":", "safe", "=", "(", "'<![CDATA[%s]]>'", "%", "data", ")", "if", "cdata", "else", "cgi", ".", "escape", "(", "str", "(", "data", ")", ",", "True", ")", "return", "safe" ...
r"""Convert xml special chars to entities. :param data: the data will be converted safe. :param cdata: whether to use cdata. Default:``True``. If not, use :func:`cgi.escape` to convert data. :type cdata: bool :rtype: str
[ "r", "Convert", "xml", "special", "chars", "to", "entities", "." ]
python
train
DeepHorizons/iarm
iarm/arm_instructions/_meta.py
https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/_meta.py#L306-L327
def set_APSR_flag_to_value(self, flag, value): """ Set or clear flag in ASPR :param flag: The flag to set :param value: If value evaulates to true, it is set, cleared otherwise :return: """ if flag == 'N': bit = 31 elif flag == 'Z': bit = 30 elif flag == 'C': bit = 29 elif flag == 'V': bit = 28 else: raise AttributeError("Flag {} does not exist in the APSR".format(flag)) if value: self.register['APSR'] |= (1 << bit) else: self.register['APSR'] -= (1 << bit) if (self.register['APSR'] & (1 << bit)) else 0
[ "def", "set_APSR_flag_to_value", "(", "self", ",", "flag", ",", "value", ")", ":", "if", "flag", "==", "'N'", ":", "bit", "=", "31", "elif", "flag", "==", "'Z'", ":", "bit", "=", "30", "elif", "flag", "==", "'C'", ":", "bit", "=", "29", "elif", "...
Set or clear flag in ASPR :param flag: The flag to set :param value: If value evaulates to true, it is set, cleared otherwise :return:
[ "Set", "or", "clear", "flag", "in", "ASPR", ":", "param", "flag", ":", "The", "flag", "to", "set", ":", "param", "value", ":", "If", "value", "evaulates", "to", "true", "it", "is", "set", "cleared", "otherwise", ":", "return", ":" ]
python
train
ponty/EasyProcess
easyprocess/__init__.py
https://github.com/ponty/EasyProcess/blob/81c2923339e09a86b6a2b8c12dc960f1bc67db9c/easyprocess/__init__.py#L201-L235
def start(self): """start command in background and does not wait for it. :rtype: self """ if self.is_started: raise EasyProcessError(self, 'process was started twice!') if self.use_temp_files: self._stdout_file = tempfile.TemporaryFile(prefix='stdout_') self._stderr_file = tempfile.TemporaryFile(prefix='stderr_') stdout = self._stdout_file stderr = self._stderr_file else: stdout = subprocess.PIPE stderr = subprocess.PIPE cmd = list(map(uniencode, self.cmd)) try: self.popen = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, cwd=self.cwd, env=self.env, ) except OSError as oserror: log.debug('OSError exception: %s', oserror) self.oserror = oserror raise EasyProcessError(self, 'start error') self.is_started = True log.debug('process was started (pid=%s)', self.pid) return self
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "is_started", ":", "raise", "EasyProcessError", "(", "self", ",", "'process was started twice!'", ")", "if", "self", ".", "use_temp_files", ":", "self", ".", "_stdout_file", "=", "tempfile", ".", "Tem...
start command in background and does not wait for it. :rtype: self
[ "start", "command", "in", "background", "and", "does", "not", "wait", "for", "it", "." ]
python
train
boriel/zxbasic
zxbpplex.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbpplex.py#L173-L177
def t_comment_endBlock(self, t): r"'/" self.__COMMENT_LEVEL -= 1 if not self.__COMMENT_LEVEL: t.lexer.begin('INITIAL')
[ "def", "t_comment_endBlock", "(", "self", ",", "t", ")", ":", "self", ".", "__COMMENT_LEVEL", "-=", "1", "if", "not", "self", ".", "__COMMENT_LEVEL", ":", "t", ".", "lexer", ".", "begin", "(", "'INITIAL'", ")" ]
r"'/
[ "r", "/" ]
python
train
GeorgeArgyros/symautomata
symautomata/pythondfa.py
https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/pythondfa.py#L160-L165
def define(self): """If DFA is empty, create a sink state""" if len(self.states) == 0: for char in self.alphabet: self.add_arc(0, 0, char) self[0].final = False
[ "def", "define", "(", "self", ")", ":", "if", "len", "(", "self", ".", "states", ")", "==", "0", ":", "for", "char", "in", "self", ".", "alphabet", ":", "self", ".", "add_arc", "(", "0", ",", "0", ",", "char", ")", "self", "[", "0", "]", ".",...
If DFA is empty, create a sink state
[ "If", "DFA", "is", "empty", "create", "a", "sink", "state" ]
python
train
icgood/pymap
pymap/mime/parsed.py
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/mime/parsed.py#L168-L173
def content_location(self) -> Optional[UnstructuredHeader]: """The ``Content-Location`` header.""" try: return cast(UnstructuredHeader, self[b'content-location'][0]) except (KeyError, IndexError): return None
[ "def", "content_location", "(", "self", ")", "->", "Optional", "[", "UnstructuredHeader", "]", ":", "try", ":", "return", "cast", "(", "UnstructuredHeader", ",", "self", "[", "b'content-location'", "]", "[", "0", "]", ")", "except", "(", "KeyError", ",", "...
The ``Content-Location`` header.
[ "The", "Content", "-", "Location", "header", "." ]
python
train
alexandrovteam/pyimzML
pyimzml/ImzMLWriter.py
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLWriter.py#L249-L269
def _get_previous_mz(self, mzs): '''given an mz array, return the mz_data (disk location) if the mz array was not previously written, write to disk first''' mzs = tuple(mzs) # must be hashable if mzs in self.lru_cache: return self.lru_cache[mzs] # mz not recognized ... check hash mz_hash = "%s-%s-%s" % (hash(mzs), sum(mzs), len(mzs)) if mz_hash in self.hashes: for mz_data in self.hashes[mz_hash]: test_mz = self._read_mz(*mz_data) if mzs == test_mz: self.lru_cache[test_mz] = mz_data return mz_data # hash not recognized # must be a new mz array ... write it, add it to lru_cache and hashes mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) self.hashes[mz_hash].append(mz_data) self.lru_cache[mzs] = mz_data return mz_data
[ "def", "_get_previous_mz", "(", "self", ",", "mzs", ")", ":", "mzs", "=", "tuple", "(", "mzs", ")", "# must be hashable", "if", "mzs", "in", "self", ".", "lru_cache", ":", "return", "self", ".", "lru_cache", "[", "mzs", "]", "# mz not recognized ... check ha...
given an mz array, return the mz_data (disk location) if the mz array was not previously written, write to disk first
[ "given", "an", "mz", "array", "return", "the", "mz_data", "(", "disk", "location", ")", "if", "the", "mz", "array", "was", "not", "previously", "written", "write", "to", "disk", "first" ]
python
train
Shinichi-Nakagawa/pitchpx
pitchpx/game/game.py
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/game/game.py#L152-L170
def _get_game_type_des(cls, game_type): """ get game type description :param game_type: game type :return: game type description """ if game_type == 'S': return 'Spring Training' elif game_type == 'R': return 'Regular Season' elif game_type == 'F': return 'Wild-card Game' elif game_type == 'D': return 'Divisional Series' elif game_type == 'L': return 'LCS' elif game_type == 'W': return 'World Series' return MlbamConst.UNKNOWN_FULL
[ "def", "_get_game_type_des", "(", "cls", ",", "game_type", ")", ":", "if", "game_type", "==", "'S'", ":", "return", "'Spring Training'", "elif", "game_type", "==", "'R'", ":", "return", "'Regular Season'", "elif", "game_type", "==", "'F'", ":", "return", "'Wil...
get game type description :param game_type: game type :return: game type description
[ "get", "game", "type", "description", ":", "param", "game_type", ":", "game", "type", ":", "return", ":", "game", "type", "description" ]
python
train
thieman/dagobah
dagobah/core/core.py
https://github.com/thieman/dagobah/blob/e624180c2291034960302c9e0b818b65b5a7ee11/dagobah/core/core.py#L108-L133
def _add_job_from_spec(self, job_json, use_job_id=True): """ Add a single job to the Dagobah from a spec. """ job_id = (job_json['job_id'] if use_job_id else self.backend.get_new_job_id()) self.add_job(str(job_json['name']), job_id) job = self.get_job(job_json['name']) if job_json.get('cron_schedule', None): job.schedule(job_json['cron_schedule']) for task in job_json.get('tasks', []): self.add_task_to_job(job, str(task['command']), str(task['name']), soft_timeout=task.get('soft_timeout', 0), hard_timeout=task.get('hard_timeout', 0), hostname=task.get('hostname', None)) dependencies = job_json.get('dependencies', {}) for from_node, to_nodes in dependencies.iteritems(): for to_node in to_nodes: job.add_dependency(from_node, to_node) if job_json.get('notes', None): job.update_job_notes(job_json['notes'])
[ "def", "_add_job_from_spec", "(", "self", ",", "job_json", ",", "use_job_id", "=", "True", ")", ":", "job_id", "=", "(", "job_json", "[", "'job_id'", "]", "if", "use_job_id", "else", "self", ".", "backend", ".", "get_new_job_id", "(", ")", ")", "self", "...
Add a single job to the Dagobah from a spec.
[ "Add", "a", "single", "job", "to", "the", "Dagobah", "from", "a", "spec", "." ]
python
train
basho/riak-python-client
riak/datatypes/hll.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/datatypes/hll.py#L62-L72
def add(self, element): """ Adds an element to the HyperLogLog. Datatype cardinality will be updated when the object is saved. :param element: the element to add :type element: str """ if not isinstance(element, six.string_types): raise TypeError("Hll elements can only be strings") self._adds.add(element)
[ "def", "add", "(", "self", ",", "element", ")", ":", "if", "not", "isinstance", "(", "element", ",", "six", ".", "string_types", ")", ":", "raise", "TypeError", "(", "\"Hll elements can only be strings\"", ")", "self", ".", "_adds", ".", "add", "(", "eleme...
Adds an element to the HyperLogLog. Datatype cardinality will be updated when the object is saved. :param element: the element to add :type element: str
[ "Adds", "an", "element", "to", "the", "HyperLogLog", ".", "Datatype", "cardinality", "will", "be", "updated", "when", "the", "object", "is", "saved", "." ]
python
train
nathancahill/mimicdb
mimicdb/s3/bucket.py
https://github.com/nathancahill/mimicdb/blob/9d0e8ebcba31d937f73752f9b88e5a4fec860765/mimicdb/s3/bucket.py#L150-L163
def sync(self): """Sync a bucket. Force all API calls to S3 and populate the database with the current state of S3. """ for key in mimicdb.backend.smembers(tpl.bucket % self.name): mimicdb.backend.delete(tpl.key % (self.name, key)) mimicdb.backend.delete(tpl.bucket % self.name) mimicdb.backend.sadd(tpl.connection, self.name) for key in self.list(force=True): mimicdb.backend.sadd(tpl.bucket % self.name, key.name) mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('"')))
[ "def", "sync", "(", "self", ")", ":", "for", "key", "in", "mimicdb", ".", "backend", ".", "smembers", "(", "tpl", ".", "bucket", "%", "self", ".", "name", ")", ":", "mimicdb", ".", "backend", ".", "delete", "(", "tpl", ".", "key", "%", "(", "self...
Sync a bucket. Force all API calls to S3 and populate the database with the current state of S3.
[ "Sync", "a", "bucket", "." ]
python
valid
DataBiosphere/toil
src/toil/jobStores/aws/utils.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/utils.py#L311-L328
def _put_attributes_using_post(self, domain_or_name, item_name, attributes, replace=True, expected_value=None): """ Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit for attribute values. Using POST prevents that. https://github.com/BD2KGenomics/toil/issues/502 """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name, 'ItemName': item_name} self._build_name_value_list(params, attributes, replace) if expected_value: self._build_expected_value(params, expected_value) # The addition of the verb keyword argument is the only difference to put_attributes (Hannes) return self.get_status('PutAttributes', params, verb='POST')
[ "def", "_put_attributes_using_post", "(", "self", ",", "domain_or_name", ",", "item_name", ",", "attributes", ",", "replace", "=", "True", ",", "expected_value", "=", "None", ")", ":", "domain", ",", "domain_name", "=", "self", ".", "get_domain_and_name", "(", ...
Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit for attribute values. Using POST prevents that. https://github.com/BD2KGenomics/toil/issues/502
[ "Monkey", "-", "patched", "version", "of", "SDBConnection", ".", "put_attributes", "that", "uses", "POST", "instead", "of", "GET" ]
python
train
Stewori/pytypes
pytypes/__init__.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/__init__.py#L409-L422
def enable_global_auto_override_decorator(flag = True, retrospective = True): """Enables or disables global auto_override mode via decorators. See flag global_auto_override_decorator. In contrast to setting the flag directly, this function provides a retrospective option. If retrospective is true, this will also affect already imported modules, not only future imports. """ global global_auto_override_decorator global_auto_override_decorator = flag if import_hook_enabled: _install_import_hook() if global_auto_override_decorator and retrospective: _catch_up_global_auto_override_decorator() return global_auto_override_decorator
[ "def", "enable_global_auto_override_decorator", "(", "flag", "=", "True", ",", "retrospective", "=", "True", ")", ":", "global", "global_auto_override_decorator", "global_auto_override_decorator", "=", "flag", "if", "import_hook_enabled", ":", "_install_import_hook", "(", ...
Enables or disables global auto_override mode via decorators. See flag global_auto_override_decorator. In contrast to setting the flag directly, this function provides a retrospective option. If retrospective is true, this will also affect already imported modules, not only future imports.
[ "Enables", "or", "disables", "global", "auto_override", "mode", "via", "decorators", ".", "See", "flag", "global_auto_override_decorator", ".", "In", "contrast", "to", "setting", "the", "flag", "directly", "this", "function", "provides", "a", "retrospective", "optio...
python
train
cebel/pyuniprot
src/pyuniprot/manager/query.py
https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L973-L979
def subcellular_locations(self): """Distinct subcellular locations (``location`` in :class:`.models.SubcellularLocation`) :return: all distinct subcellular locations :rtype: list[str] """ return [x[0] for x in self.session.query(models.SubcellularLocation.location).all()]
[ "def", "subcellular_locations", "(", "self", ")", ":", "return", "[", "x", "[", "0", "]", "for", "x", "in", "self", ".", "session", ".", "query", "(", "models", ".", "SubcellularLocation", ".", "location", ")", ".", "all", "(", ")", "]" ]
Distinct subcellular locations (``location`` in :class:`.models.SubcellularLocation`) :return: all distinct subcellular locations :rtype: list[str]
[ "Distinct", "subcellular", "locations", "(", "location", "in", ":", "class", ":", ".", "models", ".", "SubcellularLocation", ")" ]
python
train
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L77-L128
def _args_to_cromwell(args): """Convert input arguments into cromwell inputs for config and command line. """ default_config = {"slurm": {"timelimit": "1-00:00", "account": ""}, "sge": {"memtype": "mem_free", "pename": "smp"}, "lsf": {"walltime": "24:00", "account": ""}, "htcondor": {}, "torque": {"walltime": "24:00:00", "account": ""}, "pbspro": {"walltime": "24:00:00", "account": "", "cpu_and_mem": "-l select=1:ncpus=${cpu}:mem=${memory_mb}mb"}} prefixes = {("account", "slurm"): "-A ", ("account", "pbspro"): "-A "} custom = {("noselect", "pbspro"): ("cpu_and_mem", "-l ncpus=${cpu} -l mem=${memory_mb}mb")} cl = [] config = {} # HPC scheduling if args.scheduler: if args.scheduler not in default_config: raise ValueError("Scheduler not yet supported by Cromwell: %s" % args.scheduler) if not args.queue and args.scheduler not in ["htcondor"]: raise ValueError("Need to set queue (-q) for running with an HPC scheduler") config = default_config[args.scheduler] cl.append("-Dbackend.default=%s" % args.scheduler.upper()) config["queue"] = args.queue for rs in args.resources: for r in rs.split(";"): parts = r.split("=") if len(parts) == 2: key, val = parts config[key] = prefixes.get((key, args.scheduler), "") + val elif len(parts) == 1 and (parts[0], args.scheduler) in custom: key, val = custom[(parts[0], args.scheduler)] config[key] = val cloud_type = None if args.cloud_project: if args.cloud_root and args.cloud_root.startswith("gs:"): cloud_type = "PAPI" cloud_root = args.cloud_root cloud_region = None elif ((args.cloud_root and args.cloud_root.startswith("s3:")) or (args.cloud_project and args.cloud_project.startswith("arn:"))): cloud_type = "AWSBATCH" cloud_root = args.cloud_root if not cloud_root.startswith("s3://"): cloud_root = "s3://%s" % cloud_root # split region from input Amazon Resource Name, ie arn:aws:batch:us-east-1: cloud_region = args.cloud_project.split(":")[3] else: raise ValueError("Unexpected inputs for Cromwell Cloud support: %s %s" % (args.cloud_project, args.cloud_root)) config = {"cloud_project": args.cloud_project, "cloud_root": cloud_root, "cloud_region": cloud_region} cl.append("-Dbackend.default=%s" % cloud_type) return cl, config, args.scheduler, cloud_type
[ "def", "_args_to_cromwell", "(", "args", ")", ":", "default_config", "=", "{", "\"slurm\"", ":", "{", "\"timelimit\"", ":", "\"1-00:00\"", ",", "\"account\"", ":", "\"\"", "}", ",", "\"sge\"", ":", "{", "\"memtype\"", ":", "\"mem_free\"", ",", "\"pename\"", ...
Convert input arguments into cromwell inputs for config and command line.
[ "Convert", "input", "arguments", "into", "cromwell", "inputs", "for", "config", "and", "command", "line", "." ]
python
train
saltstack/salt
salt/states/cabal.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cabal.py#L58-L169
def installed(name, pkgs=None, user=None, install_global=False, env=None): ''' Verify that the given package is installed and is at the correct version (if specified). .. code-block:: yaml ShellCheck-0.3.5: cabal: - installed: name The package to install user The user to run cabal install with install_global Install package globally instead of locally env A list of environment variables to be set prior to execution. The format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`. state function. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} try: call = __salt__['cabal.update'](user=user, env=env) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = 'Could not run cabal update {0}'.format(err) return ret if pkgs is not None: pkg_list = pkgs else: pkg_list = [name] try: installed_pkgs = __salt__['cabal.list']( user=user, installed=True, env=env) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = 'Error looking up \'{0}\': {1}'.format(name, err) return ret pkgs_satisfied = [] pkgs_to_install = [] for pkg in pkg_list: pkg_name, _, pkg_ver = _parse_pkg_string(pkg) if pkg_name not in installed_pkgs: pkgs_to_install.append(pkg) else: if pkg_ver: # version is specified if installed_pkgs[pkg_name] != pkg_ver: pkgs_to_install.append(pkg) else: pkgs_satisfied.append(pkg) else: pkgs_satisfied.append(pkg) if __opts__['test']: ret['result'] = None comment_msg = [] if pkgs_to_install: comment_msg.append( 'Packages(s) \'{0}\' are set to be installed'.format( ', '.join(pkgs_to_install))) if pkgs_satisfied: comment_msg.append( 'Packages(s) \'{0}\' satisfied by {1}'.format( ', '.join(pkg_list), ', '.join(pkgs_satisfied))) ret['comment'] = '. '.join(comment_msg) return ret if not pkgs_to_install: ret['result'] = True ret['comment'] = ('Packages(s) \'{0}\' satisfied by {1}'.format( ', '.join(pkg_list), ', '.join(pkgs_satisfied))) return ret try: call = __salt__['cabal.install'](pkgs=pkg_list, user=user, install_global=install_global, env=env) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = 'Error installing \'{0}\': {1}'.format( ', '.join(pkg_list), err) return ret if call and isinstance(call, dict): ret['result'] = True ret['changes'] = {'old': [], 'new': pkgs_to_install} ret['comment'] = 'Packages(s) \'{0}\' successfully installed'.format( ', '.join(pkgs_to_install)) else: ret['result'] = False ret['comment'] = 'Could not install packages(s) \'{0}\''.format( ', '.join(pkg_list)) return ret
[ "def", "installed", "(", "name", ",", "pkgs", "=", "None", ",", "user", "=", "None", ",", "install_global", "=", "False", ",", "env", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "None", ",", "'comment'", ":"...
Verify that the given package is installed and is at the correct version (if specified). .. code-block:: yaml ShellCheck-0.3.5: cabal: - installed: name The package to install user The user to run cabal install with install_global Install package globally instead of locally env A list of environment variables to be set prior to execution. The format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`. state function.
[ "Verify", "that", "the", "given", "package", "is", "installed", "and", "is", "at", "the", "correct", "version", "(", "if", "specified", ")", "." ]
python
train
bram85/topydo
topydo/ui/columns/CommandLineWidget.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/columns/CommandLineWidget.py#L86-L94
def insert_completion(self, p_insert): """ Inserts currently chosen completion (p_insert parameter) into proper place in edit_text and adjusts cursor position accordingly. """ start, end = self._surrounding_text final_text = start + p_insert + end self.set_edit_text(final_text) self.set_edit_pos(len(start) + len(p_insert))
[ "def", "insert_completion", "(", "self", ",", "p_insert", ")", ":", "start", ",", "end", "=", "self", ".", "_surrounding_text", "final_text", "=", "start", "+", "p_insert", "+", "end", "self", ".", "set_edit_text", "(", "final_text", ")", "self", ".", "set...
Inserts currently chosen completion (p_insert parameter) into proper place in edit_text and adjusts cursor position accordingly.
[ "Inserts", "currently", "chosen", "completion", "(", "p_insert", "parameter", ")", "into", "proper", "place", "in", "edit_text", "and", "adjusts", "cursor", "position", "accordingly", "." ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L1599-L1609
def exp10(x, context=None): """ Return ten raised to the power x. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_exp10, (BigFloat._implicit_convert(x),), context, )
[ "def", "exp10", "(", "x", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_exp10", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", ")", ",", "context", ",", ...
Return ten raised to the power x.
[ "Return", "ten", "raised", "to", "the", "power", "x", "." ]
python
train
swift-nav/libsbp
generator/sbpg/targets/java.py
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/java.py#L158-L183
def render_source(output_dir, package_spec, jenv=JENV): """ Render and output """ path, module_name = package_spec.filepath java_template = jenv.get_template(TEMPLATE_NAME) module_path = "com." + package_spec.identifier yaml_filepath = "/".join(package_spec.filepath) + ".yaml" includes = [".".join(i.split(".")[:-1]) for i in package_spec.includes] includes = [i for i in includes if i != "types"] for msg in package_spec.definitions: msg_name = classnameify(msg.identifier) if msg.sbp_id else msg.identifier l = "/".join(package_spec.filepath) destination_filename = "%s/com/%s/%s.java" % (output_dir, l , msg_name) # Create the output directory if it doesn't exist if not os.path.exists(os.path.dirname(destination_filename)): os.mkdir(os.path.dirname(destination_filename)) with open(destination_filename, 'w+') as f: print(destination_filename) f.write(java_template.render(m=msg, filepath=yaml_filepath, module_path=module_path, include=includes, description=package_spec.description))
[ "def", "render_source", "(", "output_dir", ",", "package_spec", ",", "jenv", "=", "JENV", ")", ":", "path", ",", "module_name", "=", "package_spec", ".", "filepath", "java_template", "=", "jenv", ".", "get_template", "(", "TEMPLATE_NAME", ")", "module_path", "...
Render and output
[ "Render", "and", "output" ]
python
train
spyder-ide/spyder
spyder/plugins/help/utils/sphinxify.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/help/utils/sphinxify.py#L242-L264
def generate_configuration(directory): """ Generates a Sphinx configuration in `directory`. Parameters ---------- directory : str Base directory to use """ # conf.py file for Sphinx conf = osp.join(get_module_source_path('spyder.plugins.help.utils'), 'conf.py') # Docstring layout page (in Jinja): layout = osp.join(osp.join(CONFDIR_PATH, 'templates'), 'layout.html') os.makedirs(osp.join(directory, 'templates')) os.makedirs(osp.join(directory, 'static')) shutil.copy(conf, directory) shutil.copy(layout, osp.join(directory, 'templates')) open(osp.join(directory, '__init__.py'), 'w').write('') open(osp.join(directory, 'static', 'empty'), 'w').write('')
[ "def", "generate_configuration", "(", "directory", ")", ":", "# conf.py file for Sphinx", "conf", "=", "osp", ".", "join", "(", "get_module_source_path", "(", "'spyder.plugins.help.utils'", ")", ",", "'conf.py'", ")", "# Docstring layout page (in Jinja):", "layout", "=", ...
Generates a Sphinx configuration in `directory`. Parameters ---------- directory : str Base directory to use
[ "Generates", "a", "Sphinx", "configuration", "in", "directory", "." ]
python
train
gasparka/pyhacores
pyhacores/cordic/cordic_core.py
https://github.com/gasparka/pyhacores/blob/16c186fbbf90385f2ba3498395123e79b6fcf340/pyhacores/cordic/cordic_core.py#L63-L86
def main(self, x, y, phase): """ Runs one step of pipelined CORDIC Returned phase is in 1 to -1 range """ self.initial_step(phase, x, y) # pipelined CORDIC for i in range(self.ITERATIONS - 1): if self.MODE == CordicMode.ROTATION: direction = self.phase[i] > 0 elif self.MODE == CordicMode.VECTORING: direction = self.y[i] < 0 if direction: self.x[i + 1] = self.x[i] - (self.y[i] >> i) self.y[i + 1] = self.y[i] + (self.x[i] >> i) self.phase[i + 1] = self.phase[i] - self.PHASE_LUT[i] else: self.x[i + 1] = self.x[i] + (self.y[i] >> i) self.y[i + 1] = self.y[i] - (self.x[i] >> i) self.phase[i + 1] = self.phase[i] + self.PHASE_LUT[i] return self.x[-1], self.y[-1], self.phase[-1]
[ "def", "main", "(", "self", ",", "x", ",", "y", ",", "phase", ")", ":", "self", ".", "initial_step", "(", "phase", ",", "x", ",", "y", ")", "# pipelined CORDIC", "for", "i", "in", "range", "(", "self", ".", "ITERATIONS", "-", "1", ")", ":", "if",...
Runs one step of pipelined CORDIC Returned phase is in 1 to -1 range
[ "Runs", "one", "step", "of", "pipelined", "CORDIC", "Returned", "phase", "is", "in", "1", "to", "-", "1", "range" ]
python
train
yfpeng/bioc
bioc/biocxml/encoder.py
https://github.com/yfpeng/bioc/blob/47ddaa010960d9ba673aefe068e7bbaf39f0fff4/bioc/biocxml/encoder.py#L50-L56
def encode_relation(relation: BioCRelation): """Encode a single relation.""" tree = etree.Element('relation', {'id': relation.id}) encode_infons(tree, relation.infons) for node in relation.nodes: tree.append(encode_node(node)) return tree
[ "def", "encode_relation", "(", "relation", ":", "BioCRelation", ")", ":", "tree", "=", "etree", ".", "Element", "(", "'relation'", ",", "{", "'id'", ":", "relation", ".", "id", "}", ")", "encode_infons", "(", "tree", ",", "relation", ".", "infons", ")", ...
Encode a single relation.
[ "Encode", "a", "single", "relation", "." ]
python
train
Tenchi2xh/Almonds
almonds/graphics/drawing.py
https://github.com/Tenchi2xh/Almonds/blob/6b27024729f055f2cb5e14ae3ca3cb428ae054bc/almonds/graphics/drawing.py#L132-L160
def draw_box(cb, x0, y0, w, h, fg=colors.default_fg, bg=colors.default_bg, h_seps=[], v_seps=[]): """ Draws a box in the given terminal. :type cb: cursebox.CurseBox """ w -= 1 h -= 1 corners = [(x0, y0), (x0 + w, y0), (x0, y0 + h), (x0 + w, y0 + h)] fg = fg() bg = bg() for i, c in enumerate(corners): cb.put(c[0], c[1], BOX_CORNERS[i], fg, bg) for s in h_seps + [0, h]: cb.put(x0 + 1, y0 + s, symbols["BOX_HORIZONTAL"] * (w - 1), fg, bg) for y in range(1, h): for s in v_seps + [0, w]: cb.put(x0 + s, y0 + y, symbols["BOX_VERTICAL"], fg, bg) for s in h_seps: cb.put(x0, y0 + s, symbols["BOX_X_LEFT"], fg, bg) cb.put(x0 + w, y0 + s, symbols["BOX_X_RIGHT"], fg, bg) for s in v_seps: cb.put(x0 + s, y0, symbols["BOX_X_TOP"], fg, bg) cb.put(x0 + s, y0 + h, symbols["BOX_X_BOTTOM"], fg, bg)
[ "def", "draw_box", "(", "cb", ",", "x0", ",", "y0", ",", "w", ",", "h", ",", "fg", "=", "colors", ".", "default_fg", ",", "bg", "=", "colors", ".", "default_bg", ",", "h_seps", "=", "[", "]", ",", "v_seps", "=", "[", "]", ")", ":", "w", "-=",...
Draws a box in the given terminal. :type cb: cursebox.CurseBox
[ "Draws", "a", "box", "in", "the", "given", "terminal", ".", ":", "type", "cb", ":", "cursebox", ".", "CurseBox" ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1329-L1333
def group_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/groups#show-group" api_path = "/api/v2/groups/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "group_show", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/groups/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", ...
https://developer.zendesk.com/rest_api/docs/core/groups#show-group
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "groups#show", "-", "group" ]
python
train
has2k1/plotnine
plotnine/themes/theme.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/theme.py#L195-L210
def add_theme(self, other, inplace=False): """Add themes together. Subclasses should not override this method. This will be called when adding two instances of class 'theme' together. A complete theme will annihilate any previous themes. Partial themes can be added together and can be added to a complete theme. """ if other.complete: return other theme_copy = self if inplace else deepcopy(self) theme_copy.themeables.update(deepcopy(other.themeables)) return theme_copy
[ "def", "add_theme", "(", "self", ",", "other", ",", "inplace", "=", "False", ")", ":", "if", "other", ".", "complete", ":", "return", "other", "theme_copy", "=", "self", "if", "inplace", "else", "deepcopy", "(", "self", ")", "theme_copy", ".", "themeable...
Add themes together. Subclasses should not override this method. This will be called when adding two instances of class 'theme' together. A complete theme will annihilate any previous themes. Partial themes can be added together and can be added to a complete theme.
[ "Add", "themes", "together", "." ]
python
train
sirrice/pygg
pygg/pygg.py
https://github.com/sirrice/pygg/blob/b36e19b3827e0a7d661de660b04d55a73f35896b/pygg/pygg.py#L318-L359
def gg_ipython(plot, data, width=IPYTHON_IMAGE_SIZE, height=None, *args, **kwargs): """Render pygg in an IPython notebook Allows one to say things like: import pygg p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='price', color='clarity')) p += pygg.geom_point(alpha=0.5, size = 2) p += pygg.scale_x_log10(limits=[1, 2]) pygg.gg_ipython(p, data=None, quiet=True) directly in an IPython notebook and see the resulting ggplot2 image displayed inline. This function is print a warning if the IPython library cannot be imported. The ggplot2 image is rendered as a PNG and not as a vectorized graphics object right now. Note that by default gg_ipython sets the output height and width to IPYTHON_IMAGE_SIZE pixels as this is a reasonable default size for a browser-based notebook. Height is by default None, indicating that height should be set to the same value as width. It is possible to adjust the aspect ratio of the output by providing non-None values for both width and height """ try: import IPython.display tmp_image_filename = tempfile.NamedTemporaryFile(suffix='.jpg').name # Quiet by default kwargs['quiet'] = kwargs.get('quiet', True) if width is None: raise ValueError("Width cannot be None") height = height or width w_in, h_in = size_r_img_inches(width, height) ggsave(name=tmp_image_filename, plot=plot, data=data, dpi=600, width=w_in, height=h_in, units=esc('in'), *args, **kwargs) return IPython.display.Image(filename=tmp_image_filename, width=width, height=height) except ImportError: print "Could't load IPython library; integration is disabled"
[ "def", "gg_ipython", "(", "plot", ",", "data", ",", "width", "=", "IPYTHON_IMAGE_SIZE", ",", "height", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "import", "IPython", ".", "display", "tmp_image_filename", "=", "tempfile...
Render pygg in an IPython notebook Allows one to say things like: import pygg p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='price', color='clarity')) p += pygg.geom_point(alpha=0.5, size = 2) p += pygg.scale_x_log10(limits=[1, 2]) pygg.gg_ipython(p, data=None, quiet=True) directly in an IPython notebook and see the resulting ggplot2 image displayed inline. This function is print a warning if the IPython library cannot be imported. The ggplot2 image is rendered as a PNG and not as a vectorized graphics object right now. Note that by default gg_ipython sets the output height and width to IPYTHON_IMAGE_SIZE pixels as this is a reasonable default size for a browser-based notebook. Height is by default None, indicating that height should be set to the same value as width. It is possible to adjust the aspect ratio of the output by providing non-None values for both width and height
[ "Render", "pygg", "in", "an", "IPython", "notebook" ]
python
train
chaoss/grimoirelab-elk
grimoire_elk/enriched/ceres_base.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/ceres_base.py#L161-L175
def read_item(self, from_date=None): """Read items and return them one by one. :param from_date: start date for incremental reading. :return: next single item when any available. :raises ValueError: `metadata__timestamp` field not found in index :raises NotFoundError: index not found in ElasticSearch """ search_query = self._build_search_query(from_date) for hit in helpers.scan(self._es_conn, search_query, scroll='300m', index=self._es_index, preserve_order=True): yield hit
[ "def", "read_item", "(", "self", ",", "from_date", "=", "None", ")", ":", "search_query", "=", "self", ".", "_build_search_query", "(", "from_date", ")", "for", "hit", "in", "helpers", ".", "scan", "(", "self", ".", "_es_conn", ",", "search_query", ",", ...
Read items and return them one by one. :param from_date: start date for incremental reading. :return: next single item when any available. :raises ValueError: `metadata__timestamp` field not found in index :raises NotFoundError: index not found in ElasticSearch
[ "Read", "items", "and", "return", "them", "one", "by", "one", "." ]
python
train
spyder-ide/spyder
spyder/plugins/ipythonconsole/widgets/help.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/widgets/help.py#L40-L69
def get_signature(self, content): """Get signature from inspect reply content""" data = content.get('data', {}) text = data.get('text/plain', '') if text: text = ANSI_OR_SPECIAL_PATTERN.sub('', text) self._control.current_prompt_pos = self._prompt_pos line = self._control.get_current_line_to_cursor() name = line[:-1].split('(')[-1] # Take last token after a ( name = name.split('.')[-1] # Then take last token after a . # Clean name from invalid chars try: name = self.clean_invalid_var_chars(name).split('_')[-1] except: pass argspec = getargspecfromtext(text) if argspec: # This covers cases like np.abs, whose docstring is # the same as np.absolute and because of that a proper # signature can't be obtained correctly signature = name + argspec else: signature = getsignaturefromtext(text, name) # Remove docstring for uniformity with editor signature = signature.split('Docstring:')[0] return signature else: return ''
[ "def", "get_signature", "(", "self", ",", "content", ")", ":", "data", "=", "content", ".", "get", "(", "'data'", ",", "{", "}", ")", "text", "=", "data", ".", "get", "(", "'text/plain'", ",", "''", ")", "if", "text", ":", "text", "=", "ANSI_OR_SPE...
Get signature from inspect reply content
[ "Get", "signature", "from", "inspect", "reply", "content" ]
python
train
PyCQA/astroid
astroid/rebuilder.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L861-L868
def visit_tuple(self, node, parent): """visit a Tuple node by returning a fresh instance of it""" context = self._get_context(node) newnode = nodes.Tuple( ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent ) newnode.postinit([self.visit(child, newnode) for child in node.elts]) return newnode
[ "def", "visit_tuple", "(", "self", ",", "node", ",", "parent", ")", ":", "context", "=", "self", ".", "_get_context", "(", "node", ")", "newnode", "=", "nodes", ".", "Tuple", "(", "ctx", "=", "context", ",", "lineno", "=", "node", ".", "lineno", ",",...
visit a Tuple node by returning a fresh instance of it
[ "visit", "a", "Tuple", "node", "by", "returning", "a", "fresh", "instance", "of", "it" ]
python
train
pantsbuild/pex
pex/common.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/common.py#L239-L254
def clone(self, into=None): """Clone this chroot. :keyword into: (optional) An optional destination directory to clone the Chroot into. If not specified, a temporary directory will be created. .. versionchanged:: 0.8 The temporary directory created when ``into`` is not specified is now garbage collected on interpreter exit. """ into = into or safe_mkdtemp() new_chroot = Chroot(into) for label, fileset in self.filesets.items(): for fn in fileset: new_chroot.link(os.path.join(self.chroot, fn), fn, label=label) return new_chroot
[ "def", "clone", "(", "self", ",", "into", "=", "None", ")", ":", "into", "=", "into", "or", "safe_mkdtemp", "(", ")", "new_chroot", "=", "Chroot", "(", "into", ")", "for", "label", ",", "fileset", "in", "self", ".", "filesets", ".", "items", "(", "...
Clone this chroot. :keyword into: (optional) An optional destination directory to clone the Chroot into. If not specified, a temporary directory will be created. .. versionchanged:: 0.8 The temporary directory created when ``into`` is not specified is now garbage collected on interpreter exit.
[ "Clone", "this", "chroot", "." ]
python
train
Shoobx/xmldiff
xmldiff/_diff_match_patch_py3.py
https://github.com/Shoobx/xmldiff/blob/ec7835bce9ba69ff4ce03ab6c11397183b6f8411/xmldiff/_diff_match_patch_py3.py#L1407-L1518
def patch_make(self, a, b=None, c=None): """Compute a list of patches to turn text1 into text2. Use diffs if provided, otherwise compute it ourselves. There are four ways to call this function, depending on what data is available to the caller: Method 1: a = text1, b = text2 Method 2: a = diffs Method 3 (optimal): a = text1, b = diffs Method 4 (deprecated, use method 3): a = text1, b = text2, c = diffs Args: a: text1 (methods 1,3,4) or Array of diff tuples for text1 to text2 (method 2). b: text2 (methods 1,4) or Array of diff tuples for text1 to text2 (method 3) or undefined (method 2). c: Array of diff tuples for text1 to text2 (method 4) or undefined (methods 1,2,3). Returns: Array of Patch objects. """ text1 = None diffs = None if isinstance(a, str) and isinstance(b, str) and c is None: # Method 1: text1, text2 # Compute diffs from text1 and text2. text1 = a diffs = self.diff_main(text1, b, True) if len(diffs) > 2: self.diff_cleanupSemantic(diffs) self.diff_cleanupEfficiency(diffs) elif isinstance(a, list) and b is None and c is None: # Method 2: diffs # Compute text1 from diffs. diffs = a text1 = self.diff_text1(diffs) elif isinstance(a, str) and isinstance(b, list) and c is None: # Method 3: text1, diffs text1 = a diffs = b elif (isinstance(a, str) and isinstance(b, str) and isinstance(c, list)): # Method 4: text1, text2, diffs # text2 is not used. text1 = a diffs = c else: raise ValueError("Unknown call format to patch_make.") if not diffs: return [] # Get rid of the None case. patches = [] patch = patch_obj() char_count1 = 0 # Number of characters into the text1 string. char_count2 = 0 # Number of characters into the text2 string. prepatch_text = text1 # Recreate the patches to determine context info. postpatch_text = text1 for x in range(len(diffs)): (diff_type, diff_text) = diffs[x] if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL: # A new patch starts here. patch.start1 = char_count1 patch.start2 = char_count2 if diff_type == self.DIFF_INSERT: # Insertion patch.diffs.append(diffs[x]) patch.length2 += len(diff_text) postpatch_text = (postpatch_text[:char_count2] + diff_text + postpatch_text[char_count2:]) elif diff_type == self.DIFF_DELETE: # Deletion. patch.length1 += len(diff_text) patch.diffs.append(diffs[x]) postpatch_text = (postpatch_text[:char_count2] + postpatch_text[char_count2 + len(diff_text):]) elif (diff_type == self.DIFF_EQUAL and len(diff_text) <= 2 * self.Patch_Margin and len(patch.diffs) != 0 and len(diffs) != x + 1): # Small equality inside a patch. patch.diffs.append(diffs[x]) patch.length1 += len(diff_text) patch.length2 += len(diff_text) if (diff_type == self.DIFF_EQUAL and len(diff_text) >= 2 * self.Patch_Margin): # Time for a new patch. if len(patch.diffs) != 0: self.patch_addContext(patch, prepatch_text) patches.append(patch) patch = patch_obj() # Unlike Unidiff, our patch lists have a rolling context. # https://github.com/google/diff-match-patch/wiki/Unidiff # Update prepatch text & pos to reflect the application of the # just completed patch. prepatch_text = postpatch_text char_count1 = char_count2 # Update the current character count. if diff_type != self.DIFF_INSERT: char_count1 += len(diff_text) if diff_type != self.DIFF_DELETE: char_count2 += len(diff_text) # Pick up the leftover patch if not empty. if len(patch.diffs) != 0: self.patch_addContext(patch, prepatch_text) patches.append(patch) return patches
[ "def", "patch_make", "(", "self", ",", "a", ",", "b", "=", "None", ",", "c", "=", "None", ")", ":", "text1", "=", "None", "diffs", "=", "None", "if", "isinstance", "(", "a", ",", "str", ")", "and", "isinstance", "(", "b", ",", "str", ")", "and"...
Compute a list of patches to turn text1 into text2. Use diffs if provided, otherwise compute it ourselves. There are four ways to call this function, depending on what data is available to the caller: Method 1: a = text1, b = text2 Method 2: a = diffs Method 3 (optimal): a = text1, b = diffs Method 4 (deprecated, use method 3): a = text1, b = text2, c = diffs Args: a: text1 (methods 1,3,4) or Array of diff tuples for text1 to text2 (method 2). b: text2 (methods 1,4) or Array of diff tuples for text1 to text2 (method 3) or undefined (method 2). c: Array of diff tuples for text1 to text2 (method 4) or undefined (methods 1,2,3). Returns: Array of Patch objects.
[ "Compute", "a", "list", "of", "patches", "to", "turn", "text1", "into", "text2", ".", "Use", "diffs", "if", "provided", "otherwise", "compute", "it", "ourselves", ".", "There", "are", "four", "ways", "to", "call", "this", "function", "depending", "on", "wh...
python
train
mk-fg/python-onedrive
onedrive/api_v5.py
https://github.com/mk-fg/python-onedrive/blob/74d3f6605b0e8a9031a2aab8092f551293ffb533/onedrive/api_v5.py#L260-L269
def auth_user_process_url(self, url): 'Process tokens and errors from redirect_uri.' url = urlparse.urlparse(url) url_qs = dict(it.chain.from_iterable( urlparse.parse_qsl(v) for v in [url.query, url.fragment] )) if url_qs.get('error'): raise APIAuthError( '{} :: {}'.format(url_qs['error'], url_qs.get('error_description')) ) self.auth_code = url_qs['code'] return self.auth_code
[ "def", "auth_user_process_url", "(", "self", ",", "url", ")", ":", "url", "=", "urlparse", ".", "urlparse", "(", "url", ")", "url_qs", "=", "dict", "(", "it", ".", "chain", ".", "from_iterable", "(", "urlparse", ".", "parse_qsl", "(", "v", ")", "for", ...
Process tokens and errors from redirect_uri.
[ "Process", "tokens", "and", "errors", "from", "redirect_uri", "." ]
python
test
ellethee/argparseinator
argparseinator/__init__.py
https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L697-L718
def cmd_auth(auth_phrase=None): """ set authorization for command or subcommand. """ def decorate(func): """ decorates the funcion """ # get the Singleton ap_ = ArgParseInator(skip_init=True) # set the authorization name auth_name = id(func) if auth_phrase is None: # if we don't have a specific auth_phrase we set the # **authorization needed** to True ap_.auths[auth_name] = True else: # else if we have a specific auth_phrase we set it for the # command authorization ap_.auths[auth_name] = str(auth_phrase) return func return decorate
[ "def", "cmd_auth", "(", "auth_phrase", "=", "None", ")", ":", "def", "decorate", "(", "func", ")", ":", "\"\"\"\n decorates the funcion\n \"\"\"", "# get the Singleton", "ap_", "=", "ArgParseInator", "(", "skip_init", "=", "True", ")", "# set the authori...
set authorization for command or subcommand.
[ "set", "authorization", "for", "command", "or", "subcommand", "." ]
python
train
mabuchilab/QNET
src/qnet/algebra/core/hilbert_space_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/hilbert_space_algebra.py#L770-L781
def remove(self, other): """Remove a particular factor from a tensor product space.""" if other is FullSpace: return TrivialSpace if other is TrivialSpace: return self if isinstance(other, ProductSpace): oops = set(other.operands) else: oops = {other} return ProductSpace.create( *sorted(set(self.operands).difference(oops)))
[ "def", "remove", "(", "self", ",", "other", ")", ":", "if", "other", "is", "FullSpace", ":", "return", "TrivialSpace", "if", "other", "is", "TrivialSpace", ":", "return", "self", "if", "isinstance", "(", "other", ",", "ProductSpace", ")", ":", "oops", "=...
Remove a particular factor from a tensor product space.
[ "Remove", "a", "particular", "factor", "from", "a", "tensor", "product", "space", "." ]
python
train
AtteqCom/zsl
src/zsl/resource/model_resource.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/resource/model_resource.py#L402-L409
def _update_one(self, ctx): """ Update row """ assert isinstance(ctx, ResourceQueryContext) fields = ctx.data row_id = ctx.get_row_id() return self._update_one_simple(row_id, fields, ctx)
[ "def", "_update_one", "(", "self", ",", "ctx", ")", ":", "assert", "isinstance", "(", "ctx", ",", "ResourceQueryContext", ")", "fields", "=", "ctx", ".", "data", "row_id", "=", "ctx", ".", "get_row_id", "(", ")", "return", "self", ".", "_update_one_simple"...
Update row
[ "Update", "row" ]
python
train
mushkevych/scheduler
synergy/db/manager/db_manager.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/db/manager/db_manager.py#L44-L55
def update_db(): """ writes to managed_process table records from the context.process_context """ logger = get_logger(PROCESS_SCHEDULER) managed_process_dao = ManagedProcessDao(logger) managed_process_dao.clear() for process_name, process_entry in context.process_context.items(): if not isinstance(process_entry, ManagedProcessEntry): continue managed_process_dao.update(process_entry) logger.info('Updated DB with process entry {0} from the context.'.format(process_entry.key))
[ "def", "update_db", "(", ")", ":", "logger", "=", "get_logger", "(", "PROCESS_SCHEDULER", ")", "managed_process_dao", "=", "ManagedProcessDao", "(", "logger", ")", "managed_process_dao", ".", "clear", "(", ")", "for", "process_name", ",", "process_entry", "in", ...
writes to managed_process table records from the context.process_context
[ "writes", "to", "managed_process", "table", "records", "from", "the", "context", ".", "process_context" ]
python
train
postlund/pyatv
pyatv/mrp/messages.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/messages.py#L127-L132
def command(cmd): """Playback command request.""" message = create(protobuf.SEND_COMMAND_MESSAGE) send_command = message.inner() send_command.command = cmd return message
[ "def", "command", "(", "cmd", ")", ":", "message", "=", "create", "(", "protobuf", ".", "SEND_COMMAND_MESSAGE", ")", "send_command", "=", "message", ".", "inner", "(", ")", "send_command", ".", "command", "=", "cmd", "return", "message" ]
Playback command request.
[ "Playback", "command", "request", "." ]
python
train
markchil/gptools
gptools/utils.py
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1397-L1427
def Kn2Der(nu, y, n=0): r"""Find the derivatives of :math:`K_\nu(y^{1/2})`. Parameters ---------- nu : float The order of the modified Bessel function of the second kind. y : array of float The values to evaluate at. n : nonnegative int, optional The order of derivative to take. """ n = int(n) y = scipy.asarray(y, dtype=float) sqrty = scipy.sqrt(y) if n == 0: K = scipy.special.kv(nu, sqrty) else: K = scipy.zeros_like(y) x = scipy.asarray( [ fixed_poch(1.5 - j, j) * y**(0.5 - j) for j in scipy.arange(1.0, n + 1.0, dtype=float) ] ).T for k in scipy.arange(1.0, n + 1.0, dtype=float): K += ( scipy.special.kvp(nu, sqrty, n=int(k)) * incomplete_bell_poly(n, int(k), x) ) return K
[ "def", "Kn2Der", "(", "nu", ",", "y", ",", "n", "=", "0", ")", ":", "n", "=", "int", "(", "n", ")", "y", "=", "scipy", ".", "asarray", "(", "y", ",", "dtype", "=", "float", ")", "sqrty", "=", "scipy", ".", "sqrt", "(", "y", ")", "if", "n"...
r"""Find the derivatives of :math:`K_\nu(y^{1/2})`. Parameters ---------- nu : float The order of the modified Bessel function of the second kind. y : array of float The values to evaluate at. n : nonnegative int, optional The order of derivative to take.
[ "r", "Find", "the", "derivatives", "of", ":", "math", ":", "K_", "\\", "nu", "(", "y^", "{", "1", "/", "2", "}", ")", ".", "Parameters", "----------", "nu", ":", "float", "The", "order", "of", "the", "modified", "Bessel", "function", "of", "the", "...
python
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/generic_visualizer.py
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/generic_visualizer.py#L50-L65
def _render_trajectories(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array]) -> None: '''Prints the first batch of simulated `trajectories`. Args: trajectories: NonFluents, states, actions, interms and rewards. ''' if self._verbose: non_fluents, initial_state, states, actions, interms, rewards = trajectories shape = states[0][1].shape batch_size, horizon, = shape[0], shape[1] states = [(s[0], s[1][0]) for s in states] interms = [(f[0], f[1][0]) for f in interms] actions = [(a[0], a[1][0]) for a in actions] rewards = np.reshape(rewards, [batch_size, horizon])[0] self._render_batch(non_fluents, states, actions, interms, rewards)
[ "def", "_render_trajectories", "(", "self", ",", "trajectories", ":", "Tuple", "[", "NonFluents", ",", "Fluents", ",", "Fluents", ",", "Fluents", ",", "np", ".", "array", "]", ")", "->", "None", ":", "if", "self", ".", "_verbose", ":", "non_fluents", ","...
Prints the first batch of simulated `trajectories`. Args: trajectories: NonFluents, states, actions, interms and rewards.
[ "Prints", "the", "first", "batch", "of", "simulated", "trajectories", "." ]
python
train
gusdan/geoindex
geoindex/geo_grid_index.py
https://github.com/gusdan/geoindex/blob/d1b3b5a52271200713a64041576caa1f2d588f55/geoindex/geo_grid_index.py#L51-L75
def get_nearest_points_dirty(self, center_point, radius, unit='km'): """ return approx list of point from circle with given center and radius it uses geohash and return with some error (see GEO_HASH_ERRORS) :param center_point: center of search circle :param radius: radius of search circle :return: list of GeoPoints from given area """ if unit == 'mi': radius = utils.mi_to_km(radius) grid_size = GEO_HASH_GRID_SIZE[self.precision] if radius > grid_size / 2: # radius is too big for current grid, we cannot use 9 neighbors # to cover all possible points suggested_precision = 0 for precision, max_size in GEO_HASH_GRID_SIZE.items(): if radius > max_size / 2: suggested_precision = precision - 1 break raise ValueError( 'Too large radius, please rebuild GeoHashGrid with ' 'precision={0}'.format(suggested_precision) ) me_and_neighbors = geohash.expand(self.get_point_hash(center_point)) return chain(*(self.data.get(key, []) for key in me_and_neighbors))
[ "def", "get_nearest_points_dirty", "(", "self", ",", "center_point", ",", "radius", ",", "unit", "=", "'km'", ")", ":", "if", "unit", "==", "'mi'", ":", "radius", "=", "utils", ".", "mi_to_km", "(", "radius", ")", "grid_size", "=", "GEO_HASH_GRID_SIZE", "[...
return approx list of point from circle with given center and radius it uses geohash and return with some error (see GEO_HASH_ERRORS) :param center_point: center of search circle :param radius: radius of search circle :return: list of GeoPoints from given area
[ "return", "approx", "list", "of", "point", "from", "circle", "with", "given", "center", "and", "radius", "it", "uses", "geohash", "and", "return", "with", "some", "error", "(", "see", "GEO_HASH_ERRORS", ")", ":", "param", "center_point", ":", "center", "of",...
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-grpc/opencensus/ext/grpc/utils.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-grpc/opencensus/ext/grpc/utils.py#L25-L40
def wrap_iter_with_message_events( request_or_response_iter, span, message_event_type ): """Wraps a request or response iterator to add message events to the span for each proto message sent or received """ for message_id, message in enumerate(request_or_response_iter, start=1): add_message_event( proto_message=message, span=span, message_event_type=message_event_type, message_id=message_id ) yield message
[ "def", "wrap_iter_with_message_events", "(", "request_or_response_iter", ",", "span", ",", "message_event_type", ")", ":", "for", "message_id", ",", "message", "in", "enumerate", "(", "request_or_response_iter", ",", "start", "=", "1", ")", ":", "add_message_event", ...
Wraps a request or response iterator to add message events to the span for each proto message sent or received
[ "Wraps", "a", "request", "or", "response", "iterator", "to", "add", "message", "events", "to", "the", "span", "for", "each", "proto", "message", "sent", "or", "received" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L2596-L2605
def from_input(cls, input, workdir=None, manager=None): """ Create an instance of `AbinitTask` from an ABINIT input. Args: ainput: `AbinitInput` object. workdir: Path to the working directory. manager: :class:`TaskManager` object. """ return cls(input, workdir=workdir, manager=manager)
[ "def", "from_input", "(", "cls", ",", "input", ",", "workdir", "=", "None", ",", "manager", "=", "None", ")", ":", "return", "cls", "(", "input", ",", "workdir", "=", "workdir", ",", "manager", "=", "manager", ")" ]
Create an instance of `AbinitTask` from an ABINIT input. Args: ainput: `AbinitInput` object. workdir: Path to the working directory. manager: :class:`TaskManager` object.
[ "Create", "an", "instance", "of", "AbinitTask", "from", "an", "ABINIT", "input", "." ]
python
train
mitsei/dlkit
dlkit/json_/repository/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L1831-L1867
def delete_asset_content(self, asset_content_id): """Deletes content from an ``Asset``. arg: asset_content_id (osid.id.Id): the ``Id`` of the ``AssetContent`` raise: NotFound - ``asset_content_id`` is not found raise: NullArgument - ``asset_content_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.repository.AssetAdminSession.delete_asset_content_template from dlkit.abstract_osid.id.primitives import Id as ABCId from .objects import AssetContent collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) if not isinstance(asset_content_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') asset = collection.find_one({'assetContents._id': ObjectId(asset_content_id.get_identifier())}) index = 0 found = False for i in asset['assetContents']: if i['_id'] == ObjectId(asset_content_id.get_identifier()): asset_content_map = asset['assetContents'].pop(index) index += 1 found = True if not found: raise errors.OperationFailed() AssetContent( osid_object_map=asset_content_map, runtime=self._runtime, proxy=self._proxy)._delete() collection.save(asset)
[ "def", "delete_asset_content", "(", "self", ",", "asset_content_id", ")", ":", "# Implemented from template for", "# osid.repository.AssetAdminSession.delete_asset_content_template", "from", "dlkit", ".", "abstract_osid", ".", "id", ".", "primitives", "import", "Id", "as", ...
Deletes content from an ``Asset``. arg: asset_content_id (osid.id.Id): the ``Id`` of the ``AssetContent`` raise: NotFound - ``asset_content_id`` is not found raise: NullArgument - ``asset_content_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Deletes", "content", "from", "an", "Asset", "." ]
python
train
rspivak/crammit
src/crammit/__init__.py
https://github.com/rspivak/crammit/blob/ebd0f8a9b5267e6e1483f8886329ac262ab272d6/src/crammit/__init__.py#L75-L107
def _get_bundles_by_type(self, type): """Get a dictionary of bundles for requested type. Args: type: 'javascript' or 'css' """ bundles = {} bundle_definitions = self.config.get(type) if bundle_definitions is None: return bundles # bundle name: common for bundle_name, paths in bundle_definitions.items(): bundle_files = [] # path: static/js/vendor/*.js for path in paths: # pattern: /tmp/static/js/vendor/*.js pattern = abspath = os.path.join(self.basedir, path) # assetdir: /tmp/static/js/vendor # assetdir contents: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js # - /tmp/static/js/vendor/index.html assetdir = os.path.dirname(abspath) # expanded_fnames after filtering using the pattern: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js fnames = [os.path.join(assetdir, fname) for fname in os.listdir(assetdir)] expanded_fnames = fnmatch.filter(fnames, pattern) bundle_files.extend(sorted(expanded_fnames)) bundles[bundle_name] = bundle_files return bundles
[ "def", "_get_bundles_by_type", "(", "self", ",", "type", ")", ":", "bundles", "=", "{", "}", "bundle_definitions", "=", "self", ".", "config", ".", "get", "(", "type", ")", "if", "bundle_definitions", "is", "None", ":", "return", "bundles", "# bundle name: c...
Get a dictionary of bundles for requested type. Args: type: 'javascript' or 'css'
[ "Get", "a", "dictionary", "of", "bundles", "for", "requested", "type", "." ]
python
train
NeuroML/pyNeuroML
examples/component_evaluation.py
https://github.com/NeuroML/pyNeuroML/blob/aeba2e3040b360bb26556f643cccbfb3dac3b8fb/examples/component_evaluation.py#L5-L33
def main(args=None): """Main""" vs = [(v-100)*0.001 for v in range(200)] for f in ['IM.channel.nml','Kd.channel.nml']: nml_doc = pynml.read_neuroml2_file(f) for ct in nml_doc.ComponentType: ys = [] for v in vs: req_variables = {'v':'%sV'%v,'vShift':'10mV'} vals = pynml.evaluate_component(ct,req_variables=req_variables) print vals if 'x' in vals: ys.append(vals['x']) if 't' in vals: ys.append(vals['t']) if 'r' in vals: ys.append(vals['r']) ax = pynml.generate_plot([vs],[ys], "Some traces from %s in %s"%(ct.name,f), show_plot_already=False ) print vals plt.show()
[ "def", "main", "(", "args", "=", "None", ")", ":", "vs", "=", "[", "(", "v", "-", "100", ")", "*", "0.001", "for", "v", "in", "range", "(", "200", ")", "]", "for", "f", "in", "[", "'IM.channel.nml'", ",", "'Kd.channel.nml'", "]", ":", "nml_doc", ...
Main
[ "Main" ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/sgf_wrapper.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/sgf_wrapper.py#L142-L170
def replay_sgf(sgf_contents): """Wrapper for sgf files, returning go.PositionWithContext instances. It does NOT return the very final position, as there is no follow up. To get the final position, call pwc.position.play_move(pwc.next_move) on the last PositionWithContext returned. Example usage: with open(filename) as f: for position_w_context in replay_sgf(f.read()): print(position_w_context.position) """ root_node = get_sgf_root_node(sgf_contents) props = root_node.properties assert int(sgf_prop(props.get('GM', ['1']))) == 1, "Not a Go SGF!" komi = 0 if props.get('KM') is not None: komi = float(sgf_prop(props.get('KM'))) result = utils.parse_game_result(sgf_prop(props.get('RE', ''))) pos = Position(komi=komi) current_node = root_node while pos is not None and current_node.next is not None: pos = handle_node(pos, current_node) maybe_correct_next(pos, current_node.next) next_move = get_next_move(current_node) yield PositionWithContext(pos, next_move, result) current_node = current_node.next
[ "def", "replay_sgf", "(", "sgf_contents", ")", ":", "root_node", "=", "get_sgf_root_node", "(", "sgf_contents", ")", "props", "=", "root_node", ".", "properties", "assert", "int", "(", "sgf_prop", "(", "props", ".", "get", "(", "'GM'", ",", "[", "'1'", "]"...
Wrapper for sgf files, returning go.PositionWithContext instances. It does NOT return the very final position, as there is no follow up. To get the final position, call pwc.position.play_move(pwc.next_move) on the last PositionWithContext returned. Example usage: with open(filename) as f: for position_w_context in replay_sgf(f.read()): print(position_w_context.position)
[ "Wrapper", "for", "sgf", "files", "returning", "go", ".", "PositionWithContext", "instances", "." ]
python
train
spyder-ide/conda-manager
conda_manager/api/conda_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/conda_api.py#L655-L661
def remove_environment(self, name=None, path=None, **kwargs): """ Remove an environment entirely. See ``remove``. """ return self.remove(name=name, path=path, all=True, **kwargs)
[ "def", "remove_environment", "(", "self", ",", "name", "=", "None", ",", "path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "remove", "(", "name", "=", "name", ",", "path", "=", "path", ",", "all", "=", "True", ",", "*...
Remove an environment entirely. See ``remove``.
[ "Remove", "an", "environment", "entirely", "." ]
python
train
hit9/rux
rux/parser.py
https://github.com/hit9/rux/blob/d7f60722658a3b83ac6d7bb3ca2790ac9c926b59/rux/parser.py#L44-L57
def block_code(self, text, lang): """text: unicode text to render""" if not lang: return self._code_no_lexer(text) try: lexer = get_lexer_by_name(lang, stripall=True) except ClassNotFound: # lexer not found, use plain text return self._code_no_lexer(text) formatter = HtmlFormatter() return highlight(text, lexer, formatter)
[ "def", "block_code", "(", "self", ",", "text", ",", "lang", ")", ":", "if", "not", "lang", ":", "return", "self", ".", "_code_no_lexer", "(", "text", ")", "try", ":", "lexer", "=", "get_lexer_by_name", "(", "lang", ",", "stripall", "=", "True", ")", ...
text: unicode text to render
[ "text", ":", "unicode", "text", "to", "render" ]
python
valid
adobe-apiplatform/umapi-client.py
umapi_client/api.py
https://github.com/adobe-apiplatform/umapi-client.py/blob/1c446d79643cc8615adaa23e12dce3ac5782cf76/umapi_client/api.py#L83-L100
def insert(self, **kwargs): """ Insert commands at the beginning of the sequence. This is provided because certain commands have to come first (such as user creation), but may be need to beadded after other commands have already been specified. Later calls to insert put their commands before those in the earlier calls. Also, since the order of iterated kwargs is not guaranteed (in Python 2.x), you should really only call insert with one keyword at a time. See the doc of append for more details. :param kwargs: the key/value pair to append first :return: the action, so you can append Action(...).insert(...).append(...) """ for k, v in six.iteritems(kwargs): self.commands.insert(0, {k: v}) return self
[ "def", "insert", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "kwargs", ")", ":", "self", ".", "commands", ".", "insert", "(", "0", ",", "{", "k", ":", "v", "}", ")", "return", "sel...
Insert commands at the beginning of the sequence. This is provided because certain commands have to come first (such as user creation), but may be need to beadded after other commands have already been specified. Later calls to insert put their commands before those in the earlier calls. Also, since the order of iterated kwargs is not guaranteed (in Python 2.x), you should really only call insert with one keyword at a time. See the doc of append for more details. :param kwargs: the key/value pair to append first :return: the action, so you can append Action(...).insert(...).append(...)
[ "Insert", "commands", "at", "the", "beginning", "of", "the", "sequence", "." ]
python
train
SwissDataScienceCenter/renku-python
renku/cli/_options.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/_options.py#L67-L78
def install_completion(ctx, attr, value): # pragma: no cover """Install completion for the current shell.""" import click_completion.core if not value or ctx.resilient_parsing: return value shell, path = click_completion.core.install() click.secho( '{0} completion installed in {1}'.format(shell, path), fg='green' ) ctx.exit()
[ "def", "install_completion", "(", "ctx", ",", "attr", ",", "value", ")", ":", "# pragma: no cover", "import", "click_completion", ".", "core", "if", "not", "value", "or", "ctx", ".", "resilient_parsing", ":", "return", "value", "shell", ",", "path", "=", "cl...
Install completion for the current shell.
[ "Install", "completion", "for", "the", "current", "shell", "." ]
python
train
DAI-Lab/Copulas
copulas/bivariate/clayton.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/bivariate/clayton.py#L37-L63
def cumulative_distribution(self, X): """Computes the cumulative distribution function for the copula, :math:`C(u, v)` Args: X: `np.ndarray` Returns: np.array: cumulative probability """ self.check_fit() U, V = self.split_matrix(X) if (V == 0).all() or (U == 0).all(): return np.zeros(V.shape[0]) else: cdfs = [ np.power( np.power(U[i], -self.theta) + np.power(V[i], -self.theta) - 1, -1.0 / self.theta ) if (U[i] > 0 and V[i] > 0) else 0 for i in range(len(U)) ] return np.array([max(x, 0) for x in cdfs])
[ "def", "cumulative_distribution", "(", "self", ",", "X", ")", ":", "self", ".", "check_fit", "(", ")", "U", ",", "V", "=", "self", ".", "split_matrix", "(", "X", ")", "if", "(", "V", "==", "0", ")", ".", "all", "(", ")", "or", "(", "U", "==", ...
Computes the cumulative distribution function for the copula, :math:`C(u, v)` Args: X: `np.ndarray` Returns: np.array: cumulative probability
[ "Computes", "the", "cumulative", "distribution", "function", "for", "the", "copula", ":", "math", ":", "C", "(", "u", "v", ")" ]
python
train
tensorflow/probability
tensorflow_probability/python/math/linalg.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/linalg.py#L448-L543
def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None): """Solves systems of linear eqns `A X = RHS`, given LU factorizations. Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. rhs: Matrix-shaped float `Tensor` representing targets for which to solve; `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[..., tf.newaxis])[..., 0]`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_solve"). Returns: x: The `X` in `A @ X = RHS`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[1., 2], [3, 4]], [[7, 8], [3, 4]]] inv_x = tfp.math.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ``` """ with tf.compat.v1.name_scope(name, 'lu_solve', [lower_upper, perm, rhs]): lower_upper = tf.convert_to_tensor( value=lower_upper, dtype_hint=tf.float32, name='lower_upper') perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm') rhs = tf.convert_to_tensor( value=rhs, dtype_hint=lower_upper.dtype, name='rhs') assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args) if assertions: with tf.control_dependencies(assertions): lower_upper = tf.identity(lower_upper) perm = tf.identity(perm) rhs = tf.identity(rhs) if rhs.shape.ndims == 2 and perm.shape.ndims == 1: # Both rhs and perm have scalar batch_shape. permuted_rhs = tf.gather(rhs, perm, axis=-2) else: # Either rhs or perm have non-scalar batch_shape or we can't determine # this information statically. rhs_shape = tf.shape(input=rhs) broadcast_batch_shape = tf.broadcast_dynamic_shape( rhs_shape[:-2], tf.shape(input=perm)[:-1]) d, m = rhs_shape[-2], rhs_shape[-1] rhs_broadcast_shape = tf.concat([broadcast_batch_shape, [d, m]], axis=0) # Tile out rhs. broadcast_rhs = tf.broadcast_to(rhs, rhs_broadcast_shape) broadcast_rhs = tf.reshape(broadcast_rhs, [-1, d, m]) # Tile out perm and add batch indices. broadcast_perm = tf.broadcast_to(perm, rhs_broadcast_shape[:-1]) broadcast_perm = tf.reshape(broadcast_perm, [-1, d]) broadcast_batch_size = tf.reduce_prod(input_tensor=broadcast_batch_shape) broadcast_batch_indices = tf.broadcast_to( tf.range(broadcast_batch_size)[:, tf.newaxis], [broadcast_batch_size, d]) broadcast_perm = tf.stack([broadcast_batch_indices, broadcast_perm], axis=-1) permuted_rhs = tf.gather_nd(broadcast_rhs, broadcast_perm) permuted_rhs = tf.reshape(permuted_rhs, rhs_broadcast_shape) lower = tf.linalg.set_diag( tf.linalg.band_part(lower_upper, num_lower=-1, num_upper=0), tf.ones(tf.shape(input=lower_upper)[:-1], dtype=lower_upper.dtype)) return linear_operator_util.matrix_triangular_solve_with_broadcast( lower_upper, # Only upper is accessed. linear_operator_util.matrix_triangular_solve_with_broadcast( lower, permuted_rhs), lower=False)
[ "def", "lu_solve", "(", "lower_upper", ",", "perm", ",", "rhs", ",", "validate_args", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'lu_solve'", ",", "[", "lower_upper", ...
Solves systems of linear eqns `A X = RHS`, given LU factorizations. Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. rhs: Matrix-shaped float `Tensor` representing targets for which to solve; `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[..., tf.newaxis])[..., 0]`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., "lu_solve"). Returns: x: The `X` in `A @ X = RHS`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[1., 2], [3, 4]], [[7, 8], [3, 4]]] inv_x = tfp.math.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ```
[ "Solves", "systems", "of", "linear", "eqns", "A", "X", "=", "RHS", "given", "LU", "factorizations", "." ]
python
test
Shizmob/pydle
pydle/features/rfc1459/client.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/rfc1459/client.py#L330-L337
async def cycle(self, channel): """ Rejoin channel. """ if not self.in_channel(channel): raise NotInChannel(channel) password = self.channels[channel]['password'] await self.part(channel) await self.join(channel, password)
[ "async", "def", "cycle", "(", "self", ",", "channel", ")", ":", "if", "not", "self", ".", "in_channel", "(", "channel", ")", ":", "raise", "NotInChannel", "(", "channel", ")", "password", "=", "self", ".", "channels", "[", "channel", "]", "[", "'passwo...
Rejoin channel.
[ "Rejoin", "channel", "." ]
python
train
log2timeline/dfvfs
dfvfs/vfs/encoded_stream_file_system.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/encoded_stream_file_system.py#L35-L58
def _Open(self, path_spec, mode='rb'): """Opens the file system defined by path specification. Args: path_spec (PathSpec): a path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') encoding_method = getattr(path_spec, 'encoding_method', None) if not encoding_method: raise errors.PathSpecError( 'Unsupported path specification without encoding method.') self._encoding_method = encoding_method
[ "def", "_Open", "(", "self", ",", "path_spec", ",", "mode", "=", "'rb'", ")", ":", "if", "not", "path_spec", ".", "HasParent", "(", ")", ":", "raise", "errors", ".", "PathSpecError", "(", "'Unsupported path specification without parent.'", ")", "encoding_method"...
Opens the file system defined by path specification. Args: path_spec (PathSpec): a path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
[ "Opens", "the", "file", "system", "defined", "by", "path", "specification", "." ]
python
train
mitsei/dlkit
dlkit/json_/learning/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/managers.py#L2192-L2209
def get_proficiency_admin_session(self, proxy): """Gets the ``OsidSession`` associated with the proficiency administration service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ProficiencyAdminSession) - a ``ProficiencyAdminSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_proficiency_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_proficiency_admin()`` is ``true``.* """ if not self.supports_proficiency_admin(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ProficiencyAdminSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_proficiency_admin_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_proficiency_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "Profic...
Gets the ``OsidSession`` associated with the proficiency administration service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ProficiencyAdminSession) - a ``ProficiencyAdminSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_proficiency_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_proficiency_admin()`` is ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "proficiency", "administration", "service", "." ]
python
train
pyGrowler/Growler
growler/core/application.py
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/application.py#L246-L294
def use(self, middleware=None, path='/', method_mask=HTTPMethod.ALL): """ Use the middleware (a callable with parameters res, req, next) upon requests match the provided path. A None path matches every request. Returns the middleware so this method may be used as a decorator. Args: middleware (callable): A function with signature '(req, res)' to be called with every request which matches path. path (str or regex): Object used to test the requests path. If it matches, either by equality or a successful regex match, the middleware is called with the req/res pair. method_mask (Optional[HTTPMethod]): Filters requests by HTTP method. The HTTPMethod enum behaves as a bitmask, so multiple methods may be joined by `+` or `\|`, removed with `-`, or toggled with `^` (e.g. `HTTPMethod.GET + HTTPMethod.POST`, `HTTPMethod.ALL - HTTPMethod.DELETE`). Returns: Returns the provided middleware; a requirement for this method to be used as a decorator. """ # catch decorator pattern if middleware is None: return lambda mw: self.use(mw, path, method_mask) if hasattr(middleware, '__growler_router'): router = getattr(middleware, '__growler_router') if isinstance(router, (types.MethodType,)): router = router() self.add_router(path, router) elif isinstance(type(middleware), RouterMeta): router = middleware._RouterMeta__growler_router() self.add_router(path, router) elif hasattr(middleware, '__iter__'): for mw in middleware: self.use(mw, path, method_mask) else: log.info("{} Using {} on path {}", id(self), middleware, path) self.middleware.add(path=path, func=middleware, method_mask=method_mask) return middleware
[ "def", "use", "(", "self", ",", "middleware", "=", "None", ",", "path", "=", "'/'", ",", "method_mask", "=", "HTTPMethod", ".", "ALL", ")", ":", "# catch decorator pattern", "if", "middleware", "is", "None", ":", "return", "lambda", "mw", ":", "self", "....
Use the middleware (a callable with parameters res, req, next) upon requests match the provided path. A None path matches every request. Returns the middleware so this method may be used as a decorator. Args: middleware (callable): A function with signature '(req, res)' to be called with every request which matches path. path (str or regex): Object used to test the requests path. If it matches, either by equality or a successful regex match, the middleware is called with the req/res pair. method_mask (Optional[HTTPMethod]): Filters requests by HTTP method. The HTTPMethod enum behaves as a bitmask, so multiple methods may be joined by `+` or `\|`, removed with `-`, or toggled with `^` (e.g. `HTTPMethod.GET + HTTPMethod.POST`, `HTTPMethod.ALL - HTTPMethod.DELETE`). Returns: Returns the provided middleware; a requirement for this method to be used as a decorator.
[ "Use", "the", "middleware", "(", "a", "callable", "with", "parameters", "res", "req", "next", ")", "upon", "requests", "match", "the", "provided", "path", ".", "A", "None", "path", "matches", "every", "request", ".", "Returns", "the", "middleware", "so", "...
python
train
openstack/proliantutils
proliantutils/ilo/ris.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L911-L923
def set_http_boot_url(self, url): """Set url to the UefiShellStartupUrl to the system in uefi boot mode. :param url: URL for http boot :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode. """ if(self._is_boot_mode_uefi() is True): self._change_bios_setting({'UefiShellStartupUrl': url}) else: msg = 'set_http_boot_url is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def", "set_http_boot_url", "(", "self", ",", "url", ")", ":", "if", "(", "self", ".", "_is_boot_mode_uefi", "(", ")", "is", "True", ")", ":", "self", ".", "_change_bios_setting", "(", "{", "'UefiShellStartupUrl'", ":", "url", "}", ")", "else", ":", "msg...
Set url to the UefiShellStartupUrl to the system in uefi boot mode. :param url: URL for http boot :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode.
[ "Set", "url", "to", "the", "UefiShellStartupUrl", "to", "the", "system", "in", "uefi", "boot", "mode", "." ]
python
train
danpoland/pyramid-restful-framework
pyramid_restful/pagination/utilities.py
https://github.com/danpoland/pyramid-restful-framework/blob/4d8c9db44b1869c3d1fdd59ca304c3166473fcbb/pyramid_restful/pagination/utilities.py#L17-L27
def remove_query_param(url, key): """ Given a URL and a key/val pair, remove an item in the query parameters of the URL, and return the new URL. """ (scheme, netloc, path, query, fragment) = urlparse.urlsplit(url) query_dict = urlparse.parse_qs(query) query_dict.pop(key, None) query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True) return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
[ "def", "remove_query_param", "(", "url", ",", "key", ")", ":", "(", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", ")", "=", "urlparse", ".", "urlsplit", "(", "url", ")", "query_dict", "=", "urlparse", ".", "parse_qs", "(", "query...
Given a URL and a key/val pair, remove an item in the query parameters of the URL, and return the new URL.
[ "Given", "a", "URL", "and", "a", "key", "/", "val", "pair", "remove", "an", "item", "in", "the", "query", "parameters", "of", "the", "URL", "and", "return", "the", "new", "URL", "." ]
python
train
slarse/clanimtk
clanimtk/decorator.py
https://github.com/slarse/clanimtk/blob/cb93d2e914c3ecc4e0007745ff4d546318cf3902/clanimtk/decorator.py#L18-L33
def animation(frame_function: types.FrameFunction) -> types.Animation: """Turn a FrameFunction into an Animation. Args: frame_function: A function that returns a FrameGenerator. Returns: an Animation decorator function. """ animation_ = core.Animation(frame_function) @functools.wraps(frame_function) def wrapper(*args, **kwargs): return animation_(*args, **kwargs) return wrapper
[ "def", "animation", "(", "frame_function", ":", "types", ".", "FrameFunction", ")", "->", "types", ".", "Animation", ":", "animation_", "=", "core", ".", "Animation", "(", "frame_function", ")", "@", "functools", ".", "wraps", "(", "frame_function", ")", "de...
Turn a FrameFunction into an Animation. Args: frame_function: A function that returns a FrameGenerator. Returns: an Animation decorator function.
[ "Turn", "a", "FrameFunction", "into", "an", "Animation", "." ]
python
train
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/directory.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/directory.py#L2171-L2205
def add_nic(self, nic, sync=True): """ add a nic to this OS instance. :param nic: the nic to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the nic object on list to be added on next save(). :return: """ LOGGER.debug("OSInstance.add_nic") if not sync: self.nic_2_add.append(nic) else: if nic.id is None: nic.save() if self.id is not None and nic.id is not None: params = { 'id': self.id, 'nicID': nic.id } args = {'http_operation': 'GET', 'operation_path': 'update/nics/add', 'parameters': params} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( 'OSInstance.add_nic - Problem while updating OS instance ' + self.name + '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.nic_ids.append(nic.id) nic.nic_osi_id = self.id else: LOGGER.warning( 'OSInstance.add_nic - Problem while updating OS instance ' + self.name + '. Reason: NIC ' + nic.name + ' id is None' )
[ "def", "add_nic", "(", "self", ",", "nic", ",", "sync", "=", "True", ")", ":", "LOGGER", ".", "debug", "(", "\"OSInstance.add_nic\"", ")", "if", "not", "sync", ":", "self", ".", "nic_2_add", ".", "append", "(", "nic", ")", "else", ":", "if", "nic", ...
add a nic to this OS instance. :param nic: the nic to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the nic object on list to be added on next save(). :return:
[ "add", "a", "nic", "to", "this", "OS", "instance", ".", ":", "param", "nic", ":", "the", "nic", "to", "add", "on", "this", "OS", "instance", ":", "param", "sync", ":", "If", "sync", "=", "True", "(", "default", ")", "synchronize", "with", "Ariane", ...
python
train
gem/oq-engine
openquake/hazardlib/correlation.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/correlation.py#L225-L262
def hmcorrelation(sites_or_distances, imt, uncertainty_multiplier=0): """ Returns the Heresi-Miranda correlation model. :param sites_or_distances: SiteCollection instance o distance matrix :param imt: Intensity Measure Type (PGA or SA) :param uncertainty_multiplier: Value to be multiplied by the uncertainty in the correlation parameter beta. If uncertainty_multiplier = 0 (default), the median value is used as a constant value. """ if hasattr(sites_or_distances, 'mesh'): distances = sites_or_distances.mesh.get_distance_matrix() else: distances = sites_or_distances period = imt.period # Eq. (9) if period < 1.37: Med_b = 4.231 * period * period - 5.180 * period + 13.392 else: Med_b = 0.140 * period * period - 2.249 * period + 17.050 # Eq. (10) Std_b = (4.63e-3 * period*period + 0.028 * period + 0.713) # Obtain realization of b if uncertainty_multiplier == 0: beta = Med_b else: beta = numpy.random.lognormal( numpy.log(Med_b), Std_b * uncertainty_multiplier) # Eq. (8) return numpy.exp(-numpy.power((distances / beta), 0.55))
[ "def", "hmcorrelation", "(", "sites_or_distances", ",", "imt", ",", "uncertainty_multiplier", "=", "0", ")", ":", "if", "hasattr", "(", "sites_or_distances", ",", "'mesh'", ")", ":", "distances", "=", "sites_or_distances", ".", "mesh", ".", "get_distance_matrix", ...
Returns the Heresi-Miranda correlation model. :param sites_or_distances: SiteCollection instance o distance matrix :param imt: Intensity Measure Type (PGA or SA) :param uncertainty_multiplier: Value to be multiplied by the uncertainty in the correlation parameter beta. If uncertainty_multiplier = 0 (default), the median value is used as a constant value.
[ "Returns", "the", "Heresi", "-", "Miranda", "correlation", "model", "." ]
python
train
selectel/pyte
pyte/screens.py
https://github.com/selectel/pyte/blob/8adad489f86da1788a7995720c344a2fa44f244e/pyte/screens.py#L834-L847
def ensure_vbounds(self, use_margins=None): """Ensure the cursor is within vertical screen bounds. :param bool use_margins: when ``True`` or when :data:`~pyte.modes.DECOM` is set, cursor is bounded by top and and bottom margins, instead of ``[0; lines - 1]``. """ if (use_margins or mo.DECOM in self.mode) and self.margins is not None: top, bottom = self.margins else: top, bottom = 0, self.lines - 1 self.cursor.y = min(max(top, self.cursor.y), bottom)
[ "def", "ensure_vbounds", "(", "self", ",", "use_margins", "=", "None", ")", ":", "if", "(", "use_margins", "or", "mo", ".", "DECOM", "in", "self", ".", "mode", ")", "and", "self", ".", "margins", "is", "not", "None", ":", "top", ",", "bottom", "=", ...
Ensure the cursor is within vertical screen bounds. :param bool use_margins: when ``True`` or when :data:`~pyte.modes.DECOM` is set, cursor is bounded by top and and bottom margins, instead of ``[0; lines - 1]``.
[ "Ensure", "the", "cursor", "is", "within", "vertical", "screen", "bounds", "." ]
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/commands/hash.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/commands/hash.py#L51-L57
def _hash_of_file(path, algorithm): """Return the hash digest of a file.""" with open(path, 'rb') as archive: hash = hashlib.new(algorithm) for chunk in read_chunks(archive): hash.update(chunk) return hash.hexdigest()
[ "def", "_hash_of_file", "(", "path", ",", "algorithm", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "archive", ":", "hash", "=", "hashlib", ".", "new", "(", "algorithm", ")", "for", "chunk", "in", "read_chunks", "(", "archive", ")", ...
Return the hash digest of a file.
[ "Return", "the", "hash", "digest", "of", "a", "file", "." ]
python
train
deep-compute/funcserver
funcserver/funcserver.py
https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L656-L713
def run(self): """ prepares the api and starts the tornado funcserver """ self.log_id = 0 # all active websockets and their state self.websocks = {} # all active python interpreter sessions self.pysessions = {} if self.DISABLE_REQUESTS_DEBUG_LOGS: disable_requests_debug_logs() self.threadpool = ThreadPool(self.THREADPOOL_WORKERS) self.api = None # tornado app object base_handlers = self.prepare_base_handlers() handlers = self.prepare_handlers() self.template_loader = TemplateLoader([resolve_path(self.TEMPLATE_PATH)]) _ = self.prepare_template_loader(self.template_loader) if _ is not None: self.template_loader = _ shclass = CustomStaticFileHandler shclass.PATHS.append(resolve_path(self.STATIC_PATH)) _ = self.prepare_static_paths(shclass.PATHS) if _ is not None: shclass.PATHS = _ self.static_handler_class = shclass self.nav_tabs = [('Home', '/')] if self.args.debug: self.nav_tabs += [('Console', '/console'), ('Logs', '/logs')] self.nav_tabs = self.prepare_nav_tabs(self.nav_tabs) settings = { 'static_path': '<DUMMY-INEXISTENT-PATH>', 'static_handler_class': self.static_handler_class, 'template_loader': self.template_loader, 'compress_response': True, 'debug': self.args.debug, } all_handlers = handlers + base_handlers self.app = self.APP_CLASS(**settings) self.app.add_handlers(self.VIRTUAL_HOST, all_handlers) sys.funcserver = self.app.funcserver = self self.api = self.prepare_api() if self.api is not None and not hasattr(self.api, 'log'): self.api.log = self.log if self.args.port != 0: self.app.listen(self.args.port) tornado.ioloop.IOLoop.instance().start()
[ "def", "run", "(", "self", ")", ":", "self", ".", "log_id", "=", "0", "# all active websockets and their state", "self", ".", "websocks", "=", "{", "}", "# all active python interpreter sessions", "self", ".", "pysessions", "=", "{", "}", "if", "self", ".", "D...
prepares the api and starts the tornado funcserver
[ "prepares", "the", "api", "and", "starts", "the", "tornado", "funcserver" ]
python
train
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L293-L298
def _write_vcf_breakend(brend, out_handle): """Write out a single VCF line with breakpoint information. """ out_handle.write("{0}\n".format("\t".join(str(x) for x in [brend.chrom, brend.pos + 1, brend.id, brend.ref, brend.alt, ".", "PASS", brend.info])))
[ "def", "_write_vcf_breakend", "(", "brend", ",", "out_handle", ")", ":", "out_handle", ".", "write", "(", "\"{0}\\n\"", ".", "format", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "[", "brend", ".", "chrom", ",", "brend", ...
Write out a single VCF line with breakpoint information.
[ "Write", "out", "a", "single", "VCF", "line", "with", "breakpoint", "information", "." ]
python
train
markovmodel/PyEMMA
pyemma/_base/serialization/__init__.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_base/serialization/__init__.py#L22-L38
def list_models(filename): """ Lists all models in given filename. Parameters ---------- filename: str path to filename, where the model has been stored. Returns ------- obj: dict A mapping by name and a comprehensive description like this: {model_name: {'repr' : 'string representation, 'created': 'human readable date', ...} """ from .h5file import H5File with H5File(filename, mode='r') as f: return f.models_descriptive
[ "def", "list_models", "(", "filename", ")", ":", "from", ".", "h5file", "import", "H5File", "with", "H5File", "(", "filename", ",", "mode", "=", "'r'", ")", "as", "f", ":", "return", "f", ".", "models_descriptive" ]
Lists all models in given filename. Parameters ---------- filename: str path to filename, where the model has been stored. Returns ------- obj: dict A mapping by name and a comprehensive description like this: {model_name: {'repr' : 'string representation, 'created': 'human readable date', ...}
[ "Lists", "all", "models", "in", "given", "filename", "." ]
python
train
gwastro/pycbc
pycbc/frame/frame.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/frame/frame.py#L583-L628
def update_cache_by_increment(self, blocksize): """Update the internal cache by starting from the first frame and incrementing. Guess the next frame file name by incrementing from the first found one. This allows a pattern to be used for the GPS folder of the file, which is indicated by `GPSX` where x is the number of digits to use. Parameters ---------- blocksize: int Number of seconds to increment the next frame file. """ start = float(self.raw_buffer.end_time) end = float(start + blocksize) if not hasattr(self, 'dur'): fname = glob.glob(self.frame_src[0])[0] fname = os.path.splitext(os.path.basename(fname))[0].split('-') self.beg = '-'.join([fname[0], fname[1]]) self.ref = int(fname[2]) self.dur = int(fname[3]) fstart = int(self.ref + numpy.floor((start - self.ref) / float(self.dur)) * self.dur) starts = numpy.arange(fstart, end, self.dur).astype(numpy.int) keys = [] for s in starts: pattern = self.increment_update_cache if 'GPS' in pattern: n = int(pattern[int(pattern.index('GPS') + 3)]) pattern = pattern.replace('GPS%s' % n, str(s)[0:n]) name = '%s/%s-%s-%s.gwf' % (pattern, self.beg, s, self.dur) # check that file actually exists, else abort now if not os.path.exists(name): logging.info("%s does not seem to exist yet" % name) raise RuntimeError keys.append(name) cache = locations_to_cache(keys) stream = lalframe.FrStreamCacheOpen(cache) self.stream = stream self.channel_type, self.raw_sample_rate = \ self._retrieve_metadata(self.stream, self.channel_name)
[ "def", "update_cache_by_increment", "(", "self", ",", "blocksize", ")", ":", "start", "=", "float", "(", "self", ".", "raw_buffer", ".", "end_time", ")", "end", "=", "float", "(", "start", "+", "blocksize", ")", "if", "not", "hasattr", "(", "self", ",", ...
Update the internal cache by starting from the first frame and incrementing. Guess the next frame file name by incrementing from the first found one. This allows a pattern to be used for the GPS folder of the file, which is indicated by `GPSX` where x is the number of digits to use. Parameters ---------- blocksize: int Number of seconds to increment the next frame file.
[ "Update", "the", "internal", "cache", "by", "starting", "from", "the", "first", "frame", "and", "incrementing", "." ]
python
train
amperser/proselint
proselint/checks/redundancy/misc.py
https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/checks/redundancy/misc.py#L9-L18
def check(text): """Suggest the preferred forms.""" err = "redundancy.wallace" msg = "Redundancy. Use '{}' instead of '{}'." redundancies = [ ["rectangular", ["rectangular in shape"]], ["audible", ["audible to the ear"]], ] return preferred_forms_check(text, redundancies, err, msg)
[ "def", "check", "(", "text", ")", ":", "err", "=", "\"redundancy.wallace\"", "msg", "=", "\"Redundancy. Use '{}' instead of '{}'.\"", "redundancies", "=", "[", "[", "\"rectangular\"", ",", "[", "\"rectangular in shape\"", "]", "]", ",", "[", "\"audible\"", ",", "[...
Suggest the preferred forms.
[ "Suggest", "the", "preferred", "forms", "." ]
python
train
kgaughan/dbkit
dbkit.py
https://github.com/kgaughan/dbkit/blob/2aef6376a60965d7820c91692046f4bcf7d43640/dbkit.py#L146-L152
def current(cls, with_exception=True): """ Returns the current database context. """ if with_exception and len(cls.stack) == 0: raise NoContext() return cls.stack.top()
[ "def", "current", "(", "cls", ",", "with_exception", "=", "True", ")", ":", "if", "with_exception", "and", "len", "(", "cls", ".", "stack", ")", "==", "0", ":", "raise", "NoContext", "(", ")", "return", "cls", ".", "stack", ".", "top", "(", ")" ]
Returns the current database context.
[ "Returns", "the", "current", "database", "context", "." ]
python
train
flatironinstitute/mlprocessors
mlprocessors/core.py
https://github.com/flatironinstitute/mlprocessors/blob/28d55542bbd02b9ddfe429db260f0be58f3820d3/mlprocessors/core.py#L302-L326
def spec(self): """ Generate spec for the processor as a Python dictionary. A spec is a standard way to describe a MountainLab processor in a way that is easy to process, yet still understandable by humans. This method generates a Python dictionary that complies with a spec definition. """ pspec = {} pspec['name'] = self.NAME pspec['version'] = self.VERSION pspec['description'] = self.DESCRIPTION #if hasattr(self, 'run') and callable(self.run): components = [sys.argv[0], self.NAME] if self.USE_ARGUMENTS: components.append('$(arguments)') pspec['exe_command'] = self.COMMAND or ' '.join(components) pspec['inputs'] = [ inp.spec for inp in self.INPUTS ] pspec['outputs'] = [ out.spec for out in self.OUTPUTS ] pspec['parameters'] = [ param.spec for param in self.PARAMETERS ] if hasattr(self, 'test') and callable(self.test): pspec['has_test'] = True return pspec
[ "def", "spec", "(", "self", ")", ":", "pspec", "=", "{", "}", "pspec", "[", "'name'", "]", "=", "self", ".", "NAME", "pspec", "[", "'version'", "]", "=", "self", ".", "VERSION", "pspec", "[", "'description'", "]", "=", "self", ".", "DESCRIPTION", "...
Generate spec for the processor as a Python dictionary. A spec is a standard way to describe a MountainLab processor in a way that is easy to process, yet still understandable by humans. This method generates a Python dictionary that complies with a spec definition.
[ "Generate", "spec", "for", "the", "processor", "as", "a", "Python", "dictionary", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L403-L461
def evaluate( loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir, agent_type, eval_mode, eval_with_learner, log_every_steps, debug_video_path, num_debug_videos=1, random_starts_step_limit=None, report_fn=None, report_metric=None ): """Evaluate.""" if eval_with_learner: assert agent_type == "policy" if report_fn: assert report_metric is not None eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir) video_writers = () kwargs = {} if eval_mode in ["agent_real", "agent_simulated"]: if not eval_with_learner: if debug_video_path: tf.gfile.MakeDirs(debug_video_path) video_writers = [ common_video.WholeVideoWriter( # pylint: disable=g-complex-comprehension fps=10, output_path=os.path.join(debug_video_path, "{}.avi".format(i)), file_format="avi", ) for i in range(num_debug_videos) ] kwargs["eval_fn"] = make_eval_fn_with_agent( agent_type, eval_mode, planner_hparams, model_dir, log_every_steps=log_every_steps, video_writers=video_writers, random_starts_step_limit=random_starts_step_limit ) eval_metrics = rl_utils.evaluate_all_configs( loop_hparams, policy_dir, **kwargs ) else: eval_metrics = evaluate_world_model( agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, random_starts_step_limit, debug_video_path, log_every_steps ) rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0) for video_writer in video_writers: video_writer.finish_to_disk() # Report metrics if report_fn: if report_metric == "mean_reward": metric_name = rl_utils.get_metric_name( sampling_temp=loop_hparams.eval_sampling_temps[0], max_num_noops=loop_hparams.eval_max_num_noops, clipped=False ) report_fn(eval_metrics[metric_name], 0) else: report_fn(eval_metrics[report_metric], 0) return eval_metrics
[ "def", "evaluate", "(", "loop_hparams", ",", "planner_hparams", ",", "policy_dir", ",", "model_dir", ",", "eval_metrics_dir", ",", "agent_type", ",", "eval_mode", ",", "eval_with_learner", ",", "log_every_steps", ",", "debug_video_path", ",", "num_debug_videos", "=", ...
Evaluate.
[ "Evaluate", "." ]
python
train
suds-community/suds
suds/xsd/sxbasic.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/xsd/sxbasic.py#L662-L671
def __applytns(self, root): """Make sure included schema has the same target namespace.""" TNS = "targetNamespace" tns = root.get(TNS) if tns is None: tns = self.schema.tns[1] root.set(TNS, tns) else: if self.schema.tns[1] != tns: raise Exception, "%s mismatch" % TNS
[ "def", "__applytns", "(", "self", ",", "root", ")", ":", "TNS", "=", "\"targetNamespace\"", "tns", "=", "root", ".", "get", "(", "TNS", ")", "if", "tns", "is", "None", ":", "tns", "=", "self", ".", "schema", ".", "tns", "[", "1", "]", "root", "."...
Make sure included schema has the same target namespace.
[ "Make", "sure", "included", "schema", "has", "the", "same", "target", "namespace", "." ]
python
train
brentp/cruzdb
cruzdb/sqlsoup.py
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/sqlsoup.py#L93-L101
def relate(cls, propname, *args, **kwargs): """Produce a relationship between this mapped table and another one. This makes usage of SQLAlchemy's :func:`sqlalchemy.orm.relationship` construct. """ class_mapper(cls)._configure_property(propname, relationship(*args, **kwargs))
[ "def", "relate", "(", "cls", ",", "propname", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "class_mapper", "(", "cls", ")", ".", "_configure_property", "(", "propname", ",", "relationship", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ...
Produce a relationship between this mapped table and another one. This makes usage of SQLAlchemy's :func:`sqlalchemy.orm.relationship` construct.
[ "Produce", "a", "relationship", "between", "this", "mapped", "table", "and", "another", "one", ".", "This", "makes", "usage", "of", "SQLAlchemy", "s", ":", "func", ":", "sqlalchemy", ".", "orm", ".", "relationship", "construct", "." ]
python
train
sci-bots/pygtkhelpers
pygtkhelpers/ui/views/command_textview.py
https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/views/command_textview.py#L95-L187
def get_run_command_dialog(command, shell=False, title='', data_callback=None, parent=None, **kwargs): ''' Launch command in a subprocess and create a dialog window to monitor the output of the process. Parameters ---------- command : list or str Subprocess command to execute. shell : bool, optional If :data:`shell` is ``False``, :data:`command` **must** be a :class:`list`. If :data:`shell` is ``True``, :data:`command` **must** be a :class:`str`. title : str, optional Title for dialog window and initial contents of main label. data_callback : func(dialog, command_view, fd, data), optional Callback function called when data is available for one of the file descriptors. The :data:`fd` callback parameter is 1 for ``stdout`` and 2 for ``stderr``. **kwargs Additional keyword arguments are interpreted as dialog widget property values and are applied to the dialog widget. Returns ------- gtk.Dialog Dialog with a progress bar and an expandable text view to monitor the output of the specified :data:`command`. .. note:: Subprocess is launched before returning dialog. ''' dialog = gtk.Dialog(title=title or None, parent=parent) dialog.set_size_request(540, -1) for key, value in kwargs.iteritems(): setattr(dialog.props, key, value) dialog.add_buttons(gtk.STOCK_OK, gtk.RESPONSE_OK) dialog.set_default_response(gtk.RESPONSE_OK) content_area = dialog.get_content_area() label = gtk.Label(title) label.props.xalign = .1 content_area.pack_start(label, expand=False, fill=True, padding=10) progress_bar = gtk.ProgressBar() expander = gtk.Expander('Details') # Resize window based on whether or not expander is open. expander.connect('activate', functools .partial(lambda w, e, *args: w.set_size_request(540, -1 if e.props.expanded else 480), dialog)) command_view = CommandTextView() if data_callback is not None: command_view.connect('data-written', functools.partial(data_callback, dialog)) expander.add(command_view.widget) content_area.pack_start(progress_bar, expand=False) content_area.pack_start(expander, expand=True, fill=True) button = dialog.get_action_area().get_children()[0] content_area.show_all() def _run_command(label, progress_bar, button, view, command, shell): button.props.sensitive = False text_buffer = command_view.text_view.get_buffer() text_buffer.delete(*text_buffer.get_bounds()) def _pulse(*args): progress_bar.pulse() return True timeout_id = gobject.timeout_add(250, _pulse) command_view.run(command, shell=shell) gobject.source_remove(timeout_id) progress_bar.set_fraction(1.) button.props.sensitive = True label.set_markup('{} <b>done</b>.'.format(title)) gobject.idle_add(_run_command, label, progress_bar, button, command_view, command, shell) return dialog
[ "def", "get_run_command_dialog", "(", "command", ",", "shell", "=", "False", ",", "title", "=", "''", ",", "data_callback", "=", "None", ",", "parent", "=", "None", ",", "*", "*", "kwargs", ")", ":", "dialog", "=", "gtk", ".", "Dialog", "(", "title", ...
Launch command in a subprocess and create a dialog window to monitor the output of the process. Parameters ---------- command : list or str Subprocess command to execute. shell : bool, optional If :data:`shell` is ``False``, :data:`command` **must** be a :class:`list`. If :data:`shell` is ``True``, :data:`command` **must** be a :class:`str`. title : str, optional Title for dialog window and initial contents of main label. data_callback : func(dialog, command_view, fd, data), optional Callback function called when data is available for one of the file descriptors. The :data:`fd` callback parameter is 1 for ``stdout`` and 2 for ``stderr``. **kwargs Additional keyword arguments are interpreted as dialog widget property values and are applied to the dialog widget. Returns ------- gtk.Dialog Dialog with a progress bar and an expandable text view to monitor the output of the specified :data:`command`. .. note:: Subprocess is launched before returning dialog.
[ "Launch", "command", "in", "a", "subprocess", "and", "create", "a", "dialog", "window", "to", "monitor", "the", "output", "of", "the", "process", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/cmd/load.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/load.py#L278-L323
def __load_unique_identities(self, uidentities, matcher, match_new, reset, verbose): """Load unique identities""" self.new_uids.clear() n = 0 if reset: self.__reset_unique_identities() self.log("Loading unique identities...") for uidentity in uidentities: self.log("\n=====", verbose) self.log("+ Processing %s" % uidentity.uuid, verbose) try: stored_uuid = self.__load_unique_identity(uidentity, verbose) except LoadError as e: self.error("%s Skipping." % str(e)) self.log("=====", verbose) continue stored_uuid = self.__load_identities(uidentity.identities, stored_uuid, verbose) try: self.__load_profile(uidentity.profile, stored_uuid, verbose) except Exception as e: self.error("%s. Loading %s profile. Skipping profile." % (str(e), stored_uuid)) self.__load_enrollments(uidentity.enrollments, stored_uuid, verbose) if matcher and (not match_new or stored_uuid in self.new_uids): stored_uuid = self._merge_on_matching(stored_uuid, matcher, verbose) self.log("+ %s (old %s) loaded" % (stored_uuid, uidentity.uuid), verbose) self.log("=====", verbose) n += 1 self.log("%d/%d unique identities loaded" % (n, len(uidentities)))
[ "def", "__load_unique_identities", "(", "self", ",", "uidentities", ",", "matcher", ",", "match_new", ",", "reset", ",", "verbose", ")", ":", "self", ".", "new_uids", ".", "clear", "(", ")", "n", "=", "0", "if", "reset", ":", "self", ".", "__reset_unique...
Load unique identities
[ "Load", "unique", "identities" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py#L737-L754
def delete_process_work_item_type_rule(self, process_id, wit_ref_name, rule_id): """DeleteProcessWorkItemTypeRule. [Preview API] Removes a rule from the work item type in the process. :param str process_id: The ID of the process :param str wit_ref_name: The reference name of the work item type :param str rule_id: The ID of the rule """ route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if rule_id is not None: route_values['ruleId'] = self._serialize.url('rule_id', rule_id, 'str') self._send(http_method='DELETE', location_id='76fe3432-d825-479d-a5f6-983bbb78b4f3', version='5.0-preview.2', route_values=route_values)
[ "def", "delete_process_work_item_type_rule", "(", "self", ",", "process_id", ",", "wit_ref_name", ",", "rule_id", ")", ":", "route_values", "=", "{", "}", "if", "process_id", "is", "not", "None", ":", "route_values", "[", "'processId'", "]", "=", "self", ".", ...
DeleteProcessWorkItemTypeRule. [Preview API] Removes a rule from the work item type in the process. :param str process_id: The ID of the process :param str wit_ref_name: The reference name of the work item type :param str rule_id: The ID of the rule
[ "DeleteProcessWorkItemTypeRule", ".", "[", "Preview", "API", "]", "Removes", "a", "rule", "from", "the", "work", "item", "type", "in", "the", "process", ".", ":", "param", "str", "process_id", ":", "The", "ID", "of", "the", "process", ":", "param", "str", ...
python
train
wummel/linkchecker
linkcheck/updater.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/updater.py#L36-L54
def check_update (): """Return the following values: (False, errmsg) - online version could not be determined (True, None) - user has newest version (True, (version, url string)) - update available (True, (version, None)) - current version is newer than online version """ version, value = get_online_version() if version is None: # value is an error message return False, value if version == CurrentVersion: # user has newest version return True, None if is_newer_version(version): # value is an URL linking to the update package return True, (version, value) # user is running a local or development version return True, (version, None)
[ "def", "check_update", "(", ")", ":", "version", ",", "value", "=", "get_online_version", "(", ")", "if", "version", "is", "None", ":", "# value is an error message", "return", "False", ",", "value", "if", "version", "==", "CurrentVersion", ":", "# user has newe...
Return the following values: (False, errmsg) - online version could not be determined (True, None) - user has newest version (True, (version, url string)) - update available (True, (version, None)) - current version is newer than online version
[ "Return", "the", "following", "values", ":", "(", "False", "errmsg", ")", "-", "online", "version", "could", "not", "be", "determined", "(", "True", "None", ")", "-", "user", "has", "newest", "version", "(", "True", "(", "version", "url", "string", "))",...
python
train
thumbor/libthumbor
libthumbor/url.py
https://github.com/thumbor/libthumbor/blob/8114928102ff07166ce32e6d894f30124b5e169a/libthumbor/url.py#L47-L54
def url_for(**options): '''Returns the url for the specified options''' url_parts = get_url_parts(**options) image_hash = hashlib.md5(b(options['image_url'])).hexdigest() url_parts.append(image_hash) return "/".join(url_parts)
[ "def", "url_for", "(", "*", "*", "options", ")", ":", "url_parts", "=", "get_url_parts", "(", "*", "*", "options", ")", "image_hash", "=", "hashlib", ".", "md5", "(", "b", "(", "options", "[", "'image_url'", "]", ")", ")", ".", "hexdigest", "(", ")",...
Returns the url for the specified options
[ "Returns", "the", "url", "for", "the", "specified", "options" ]
python
train
awslabs/aws-sam-cli
samcli/commands/local/lib/local_api_service.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/local_api_service.py#L86-L106
def _make_routing_list(api_provider): """ Returns a list of routes to configure the Local API Service based on the APIs configured in the template. Parameters ---------- api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider Returns ------- list(samcli.local.apigw.service.Route) List of Routes to pass to the service """ routes = [] for api in api_provider.get_all(): route = Route(methods=[api.method], function_name=api.function_name, path=api.path, binary_types=api.binary_media_types) routes.append(route) return routes
[ "def", "_make_routing_list", "(", "api_provider", ")", ":", "routes", "=", "[", "]", "for", "api", "in", "api_provider", ".", "get_all", "(", ")", ":", "route", "=", "Route", "(", "methods", "=", "[", "api", ".", "method", "]", ",", "function_name", "=...
Returns a list of routes to configure the Local API Service based on the APIs configured in the template. Parameters ---------- api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider Returns ------- list(samcli.local.apigw.service.Route) List of Routes to pass to the service
[ "Returns", "a", "list", "of", "routes", "to", "configure", "the", "Local", "API", "Service", "based", "on", "the", "APIs", "configured", "in", "the", "template", "." ]
python
train
Falkonry/falkonry-python-client
falkonryclient/service/falkonry.py
https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/service/falkonry.py#L381-L391
def on_assessment(self, assessment): """ To turn on assessment :param assessment: string """ assessmentObj = self.get_assessment(assessment) url = '/datastream/' + str(assessmentObj.get_datastream()) + '/on?assessment=' + str(assessment) response = self.http.post(url,"") return Schemas.Assessment(assessment=response[0])
[ "def", "on_assessment", "(", "self", ",", "assessment", ")", ":", "assessmentObj", "=", "self", ".", "get_assessment", "(", "assessment", ")", "url", "=", "'/datastream/'", "+", "str", "(", "assessmentObj", ".", "get_datastream", "(", ")", ")", "+", "'/on?as...
To turn on assessment :param assessment: string
[ "To", "turn", "on", "assessment", ":", "param", "assessment", ":", "string" ]
python
train
Bystroushaak/zeo_connector
src/zeo_connector/zeo_wrapper_prototype.py
https://github.com/Bystroushaak/zeo_connector/blob/93f86447204efc8e33d3112907cd221daf6bce3b/src/zeo_connector/zeo_wrapper_prototype.py#L105-L118
def _open_connection(self): """ Open the connection to the database based on the configuration file. """ if self._connection: try: self._connection.close() except Exception: pass db = self._get_db() self._connection = db.open() self._connection.onCloseCallback(self._on_close_callback)
[ "def", "_open_connection", "(", "self", ")", ":", "if", "self", ".", "_connection", ":", "try", ":", "self", ".", "_connection", ".", "close", "(", ")", "except", "Exception", ":", "pass", "db", "=", "self", ".", "_get_db", "(", ")", "self", ".", "_c...
Open the connection to the database based on the configuration file.
[ "Open", "the", "connection", "to", "the", "database", "based", "on", "the", "configuration", "file", "." ]
python
train
gmichaeljaison/cv-utils
cv_utils/img_utils.py
https://github.com/gmichaeljaison/cv-utils/blob/a8251c870165a7428d8c468a6436aa41d0cf7c09/cv_utils/img_utils.py#L115-L157
def add_text_img(img, text, pos, box=None, color=None, thickness=1, scale=1, vertical=False): """ Adds the given text in the image. :param img: Input image :param text: String text :param pos: (x, y) in the image or relative to the given Box object :param box: Box object. If not None, the text is placed inside the box. :param color: Color of the text. :param thickness: Thickness of the font. :param scale: Font size scale. :param vertical: If true, the text is displayed vertically. (slow) :return: """ if color is None: color = COL_WHITE text = str(text) top_left = pos if box is not None: top_left = box.move(pos).to_int().top_left() if top_left[0] > img.shape[1]: return if vertical: if box is not None: h, w, d = box.height, box.width, 3 else: h, w, d = img.shape txt_img = np.zeros((w, h, d), dtype=np.uint8) # 90 deg rotation top_left = h - pos[1], pos[0] cv.putText(txt_img, text, top_left, cv.FONT_HERSHEY_PLAIN, scale, color, thickness) txt_img = ndimage.rotate(txt_img, 90) mask = txt_img > 0 if box is not None: im_box = img_box(img, box) im_box[mask] = txt_img[mask] else: img[mask] = txt_img[mask] else: cv.putText(img, text, top_left, cv.FONT_HERSHEY_PLAIN, scale, color, thickness)
[ "def", "add_text_img", "(", "img", ",", "text", ",", "pos", ",", "box", "=", "None", ",", "color", "=", "None", ",", "thickness", "=", "1", ",", "scale", "=", "1", ",", "vertical", "=", "False", ")", ":", "if", "color", "is", "None", ":", "color"...
Adds the given text in the image. :param img: Input image :param text: String text :param pos: (x, y) in the image or relative to the given Box object :param box: Box object. If not None, the text is placed inside the box. :param color: Color of the text. :param thickness: Thickness of the font. :param scale: Font size scale. :param vertical: If true, the text is displayed vertically. (slow) :return:
[ "Adds", "the", "given", "text", "in", "the", "image", "." ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/text/freqdist.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/text/freqdist.py#L32-L75
def freqdist(X, y=None, ax=None, color=None, N=50, **kwargs): """Displays frequency distribution plot for text. This helper function is a quick wrapper to utilize the FreqDist Visualizer (Transformer) for one-off analysis. Parameters ---------- X: ndarray or DataFrame of shape n x m A matrix of n instances with m features. In the case of text, X is a list of list of already preprocessed words y: ndarray or Series of length n An array or series of target or class values ax: matplotlib axes The axes to plot the figure on. color: string Specify color for barchart N: integer Top N tokens to be plotted. kwargs: dict Keyword arguments passed to the super class. Returns ------- ax: matplotlib axes Returns the axes that the plot was drawn on. """ # Instantiate the visualizer visualizer = FreqDistVisualizer( ax, X, color, **kwargs ) # Fit and transform the visualizer (calls draw) visualizer.fit(X, y, **kwargs) visualizer.transform(X) # Return the axes object on the visualizer return visualizer.ax
[ "def", "freqdist", "(", "X", ",", "y", "=", "None", ",", "ax", "=", "None", ",", "color", "=", "None", ",", "N", "=", "50", ",", "*", "*", "kwargs", ")", ":", "# Instantiate the visualizer", "visualizer", "=", "FreqDistVisualizer", "(", "ax", ",", "X...
Displays frequency distribution plot for text. This helper function is a quick wrapper to utilize the FreqDist Visualizer (Transformer) for one-off analysis. Parameters ---------- X: ndarray or DataFrame of shape n x m A matrix of n instances with m features. In the case of text, X is a list of list of already preprocessed words y: ndarray or Series of length n An array or series of target or class values ax: matplotlib axes The axes to plot the figure on. color: string Specify color for barchart N: integer Top N tokens to be plotted. kwargs: dict Keyword arguments passed to the super class. Returns ------- ax: matplotlib axes Returns the axes that the plot was drawn on.
[ "Displays", "frequency", "distribution", "plot", "for", "text", "." ]
python
train