repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
mwouts/jupytext
jupytext/cell_reader.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_reader.py#L509-L557
def find_region_end(self, lines): """Find the end of the region started with start and end markers""" if self.metadata and 'cell_type' in self.metadata: self.cell_type = self.metadata.pop('cell_type') else: self.cell_type = 'code' parser = StringParser(self.language or self.default_language) for i, line in enumerate(lines): # skip cell header if self.metadata is not None and i == 0: continue if parser.is_quoted(): parser.read_line(line) continue parser.read_line(line) # New code region # Simple code pattern in LightScripts must be preceded with a blank line if self.start_code_re.match(line) or ( self.simple_start_code_re and self.simple_start_code_re.match(line) and (self.cell_marker_start or i == 0 or _BLANK_LINE.match(lines[i - 1]))): if self.explicit_end_marker_required: # Metadata here was conditioned on finding an explicit end marker # before the next start marker. So we dismiss it. self.metadata = None self.language = None if i > 0 and _BLANK_LINE.match(lines[i - 1]): if i > 1 and _BLANK_LINE.match(lines[i - 2]): return i - 2, i, False return i - 1, i, False return i, i, False if not self.ignore_end_marker and self.end_code_re: if self.end_code_re.match(line): return i, i + 1, True elif _BLANK_LINE.match(line): if not next_code_is_indented(lines[i:]): if i > 0: return i, i + 1, False if len(lines) > 1 and not _BLANK_LINE.match(lines[1]): return 1, 1, False return 1, 2, False return len(lines), len(lines), False
[ "def", "find_region_end", "(", "self", ",", "lines", ")", ":", "if", "self", ".", "metadata", "and", "'cell_type'", "in", "self", ".", "metadata", ":", "self", ".", "cell_type", "=", "self", ".", "metadata", ".", "pop", "(", "'cell_type'", ")", "else", ...
Find the end of the region started with start and end markers
[ "Find", "the", "end", "of", "the", "region", "started", "with", "start", "and", "end", "markers" ]
python
train
BlackEarth/bf
bf/css.py
https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/css.py#L91-L99
def all_selectors(Class, fn): """return a sorted list of selectors that occur in the stylesheet""" selectors = [] cssparser = cssutils.CSSParser(validate=False) css = cssparser.parseFile(fn) for rule in [r for r in css.cssRules if type(r)==cssutils.css.CSSStyleRule]: selectors += [sel.selectorText for sel in rule.selectorList] selectors = sorted(list(set(selectors))) return selectors
[ "def", "all_selectors", "(", "Class", ",", "fn", ")", ":", "selectors", "=", "[", "]", "cssparser", "=", "cssutils", ".", "CSSParser", "(", "validate", "=", "False", ")", "css", "=", "cssparser", ".", "parseFile", "(", "fn", ")", "for", "rule", "in", ...
return a sorted list of selectors that occur in the stylesheet
[ "return", "a", "sorted", "list", "of", "selectors", "that", "occur", "in", "the", "stylesheet" ]
python
train
sorgerlab/indra
indra/databases/cbio_client.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L312-L347
def get_ccle_mutations(gene_list, cell_lines, mutation_type=None): """Return a dict of mutations in given genes and cell lines from CCLE. This is a specialized call to get_mutations tailored to CCLE cell lines. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site Returns ------- mutations : dict The result from cBioPortal as a dict in the format {cell_line : {gene : [mutation1, mutation2, ...] }} Example: {'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']}, 'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}} """ mutations = {cl: {g: [] for g in gene_list} for cl in cell_lines} for cell_line in cell_lines: mutations_cl = get_mutations(ccle_study, gene_list, mutation_type=mutation_type, case_id=cell_line) for gene, aa_change in zip(mutations_cl['gene_symbol'], mutations_cl['amino_acid_change']): aa_change = str(aa_change) mutations[cell_line][gene].append(aa_change) return mutations
[ "def", "get_ccle_mutations", "(", "gene_list", ",", "cell_lines", ",", "mutation_type", "=", "None", ")", ":", "mutations", "=", "{", "cl", ":", "{", "g", ":", "[", "]", "for", "g", "in", "gene_list", "}", "for", "cl", "in", "cell_lines", "}", "for", ...
Return a dict of mutations in given genes and cell lines from CCLE. This is a specialized call to get_mutations tailored to CCLE cell lines. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site Returns ------- mutations : dict The result from cBioPortal as a dict in the format {cell_line : {gene : [mutation1, mutation2, ...] }} Example: {'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']}, 'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}}
[ "Return", "a", "dict", "of", "mutations", "in", "given", "genes", "and", "cell", "lines", "from", "CCLE", "." ]
python
train
LionelAuroux/pyrser
pyrser/hooks/set.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/hooks/set.py#L41-L52
def set_node_as_int(self, dst, src): """ Set a node to a value captured from another node example:: R = [ In : node #setcapture(_, node) ] """ dst.value = self.value(src) return True
[ "def", "set_node_as_int", "(", "self", ",", "dst", ",", "src", ")", ":", "dst", ".", "value", "=", "self", ".", "value", "(", "src", ")", "return", "True" ]
Set a node to a value captured from another node example:: R = [ In : node #setcapture(_, node) ]
[ "Set", "a", "node", "to", "a", "value", "captured", "from", "another", "node" ]
python
test
google/grr
grr/server/grr_response_server/databases/mem_artifacts.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_artifacts.py#L34-L41
def ReadAllArtifacts(self): """Lists all artifacts that are stored in the database.""" artifacts = [] for artifact in itervalues(self.artifacts): artifacts.append(artifact.Copy()) return artifacts
[ "def", "ReadAllArtifacts", "(", "self", ")", ":", "artifacts", "=", "[", "]", "for", "artifact", "in", "itervalues", "(", "self", ".", "artifacts", ")", ":", "artifacts", ".", "append", "(", "artifact", ".", "Copy", "(", ")", ")", "return", "artifacts" ]
Lists all artifacts that are stored in the database.
[ "Lists", "all", "artifacts", "that", "are", "stored", "in", "the", "database", "." ]
python
train
guma44/GEOparse
GEOparse/GEOTypes.py
https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L773-L795
def pivot_samples(self, values, index="ID_REF"): """Pivot samples by specified column. Construct a table in which columns (names) are the samples, index is a specified column eg. ID_REF and values in the columns are of one specified type. Args: values (:obj:`str`): Column name present in all GSMs. index (:obj:`str`, optional): Column name that will become an index in pivoted table. Defaults to "ID_REF". Returns: :obj:`pandas.DataFrame`: Pivoted data """ data = [] for gsm in self.gsms.values(): tmp_data = gsm.table.copy() tmp_data["name"] = gsm.name data.append(tmp_data) ndf = concat(data).pivot(index=index, values=values, columns="name") return ndf
[ "def", "pivot_samples", "(", "self", ",", "values", ",", "index", "=", "\"ID_REF\"", ")", ":", "data", "=", "[", "]", "for", "gsm", "in", "self", ".", "gsms", ".", "values", "(", ")", ":", "tmp_data", "=", "gsm", ".", "table", ".", "copy", "(", "...
Pivot samples by specified column. Construct a table in which columns (names) are the samples, index is a specified column eg. ID_REF and values in the columns are of one specified type. Args: values (:obj:`str`): Column name present in all GSMs. index (:obj:`str`, optional): Column name that will become an index in pivoted table. Defaults to "ID_REF". Returns: :obj:`pandas.DataFrame`: Pivoted data
[ "Pivot", "samples", "by", "specified", "column", "." ]
python
train
delfick/harpoon
harpoon/dockerpty/pty.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/dockerpty/pty.py#L202-L220
def resize(self, size=None): """ Resize the container's PTY. If `size` is not None, it must be a tuple of (height,width), otherwise it will be determined by the size of the current TTY. """ if not self.israw(): return size = size or tty.size(self.stdout) if size is not None: rows, cols = size try: self.client.resize(self.container, height=rows, width=cols) except IOError: # Container already exited pass
[ "def", "resize", "(", "self", ",", "size", "=", "None", ")", ":", "if", "not", "self", ".", "israw", "(", ")", ":", "return", "size", "=", "size", "or", "tty", ".", "size", "(", "self", ".", "stdout", ")", "if", "size", "is", "not", "None", ":"...
Resize the container's PTY. If `size` is not None, it must be a tuple of (height,width), otherwise it will be determined by the size of the current TTY.
[ "Resize", "the", "container", "s", "PTY", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/registry.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/registry.py#L429-L454
def _split_path(self, path): """ Splits a Registry path and returns the hive and key. @type path: str @param path: Registry path. @rtype: tuple( int, str ) @return: Tuple containing the hive handle and the subkey path. The hive handle is always one of the following integer constants: - L{win32.HKEY_CLASSES_ROOT} - L{win32.HKEY_CURRENT_USER} - L{win32.HKEY_LOCAL_MACHINE} - L{win32.HKEY_USERS} - L{win32.HKEY_PERFORMANCE_DATA} - L{win32.HKEY_CURRENT_CONFIG} """ if '\\' in path: p = path.find('\\') hive = path[:p] path = path[p+1:] else: hive = path path = None handle = self._hives_by_name[ hive.upper() ] return handle, path
[ "def", "_split_path", "(", "self", ",", "path", ")", ":", "if", "'\\\\'", "in", "path", ":", "p", "=", "path", ".", "find", "(", "'\\\\'", ")", "hive", "=", "path", "[", ":", "p", "]", "path", "=", "path", "[", "p", "+", "1", ":", "]", "else"...
Splits a Registry path and returns the hive and key. @type path: str @param path: Registry path. @rtype: tuple( int, str ) @return: Tuple containing the hive handle and the subkey path. The hive handle is always one of the following integer constants: - L{win32.HKEY_CLASSES_ROOT} - L{win32.HKEY_CURRENT_USER} - L{win32.HKEY_LOCAL_MACHINE} - L{win32.HKEY_USERS} - L{win32.HKEY_PERFORMANCE_DATA} - L{win32.HKEY_CURRENT_CONFIG}
[ "Splits", "a", "Registry", "path", "and", "returns", "the", "hive", "and", "key", "." ]
python
train
PolicyStat/docx2html
docx2html/core.py
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L34-L46
def replace_ext(file_path, new_ext): """ >>> replace_ext('one/two/three.four.doc', '.html') 'one/two/three.four.html' >>> replace_ext('one/two/three.four.DOC', '.html') 'one/two/three.four.html' >>> replace_ext('one/two/three.four.DOC', 'html') 'one/two/three.four.html' """ if not new_ext.startswith(os.extsep): new_ext = os.extsep + new_ext index = file_path.rfind(os.extsep) return file_path[:index] + new_ext
[ "def", "replace_ext", "(", "file_path", ",", "new_ext", ")", ":", "if", "not", "new_ext", ".", "startswith", "(", "os", ".", "extsep", ")", ":", "new_ext", "=", "os", ".", "extsep", "+", "new_ext", "index", "=", "file_path", ".", "rfind", "(", "os", ...
>>> replace_ext('one/two/three.four.doc', '.html') 'one/two/three.four.html' >>> replace_ext('one/two/three.four.DOC', '.html') 'one/two/three.four.html' >>> replace_ext('one/two/three.four.DOC', 'html') 'one/two/three.four.html'
[ ">>>", "replace_ext", "(", "one", "/", "two", "/", "three", ".", "four", ".", "doc", ".", "html", ")", "one", "/", "two", "/", "three", ".", "four", ".", "html", ">>>", "replace_ext", "(", "one", "/", "two", "/", "three", ".", "four", ".", "DOC",...
python
test
Miserlou/Zappa
zappa/core.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L2239-L2248
def stack_outputs(self, name): """ Given a name, describes CloudFront stacks and returns dict of the stack Outputs , else returns an empty dict. """ try: stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0] return {x['OutputKey']: x['OutputValue'] for x in stack['Outputs']} except botocore.client.ClientError: return {}
[ "def", "stack_outputs", "(", "self", ",", "name", ")", ":", "try", ":", "stack", "=", "self", ".", "cf_client", ".", "describe_stacks", "(", "StackName", "=", "name", ")", "[", "'Stacks'", "]", "[", "0", "]", "return", "{", "x", "[", "'OutputKey'", "...
Given a name, describes CloudFront stacks and returns dict of the stack Outputs , else returns an empty dict.
[ "Given", "a", "name", "describes", "CloudFront", "stacks", "and", "returns", "dict", "of", "the", "stack", "Outputs", "else", "returns", "an", "empty", "dict", "." ]
python
train
saltstack/salt
salt/utils/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/cloud.py#L2425-L2448
def is_public_ip(ip): ''' Determines whether an IP address falls within one of the private IP ranges ''' if ':' in ip: # ipv6 if ip.startswith('fe80:'): # ipv6 link local return False return True addr = ip_to_int(ip) if 167772160 < addr < 184549375: # 10.0.0.0/8 return False elif 3232235520 < addr < 3232301055: # 192.168.0.0/16 return False elif 2886729728 < addr < 2887778303: # 172.16.0.0/12 return False elif 2130706432 < addr < 2147483647: # 127.0.0.0/8 return False return True
[ "def", "is_public_ip", "(", "ip", ")", ":", "if", "':'", "in", "ip", ":", "# ipv6", "if", "ip", ".", "startswith", "(", "'fe80:'", ")", ":", "# ipv6 link local", "return", "False", "return", "True", "addr", "=", "ip_to_int", "(", "ip", ")", "if", "1677...
Determines whether an IP address falls within one of the private IP ranges
[ "Determines", "whether", "an", "IP", "address", "falls", "within", "one", "of", "the", "private", "IP", "ranges" ]
python
train
user-cont/conu
conu/backend/docker/container.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/docker/container.py#L459-L506
def execute(self, command, blocking=True, exec_create_kwargs=None, exec_start_kwargs=None): """ Execute a command in this container -- the container needs to be running. If the command fails, a ConuException is thrown. This is a blocking call by default and writes output of the command to logger using the INFO level -- this behavior can be changed if you set the argument `blocking` to `False`. If not blocking, you should consume the returned iterator in order to see logs or know when the command finished: :: for line in container.execute(["ping", "-c", "4", "8.8.8.8"], blocking=False): print(line) print("command finished") :param command: list of str, command to execute in the container :param blocking: bool, if True blocks until the command finishes :param exec_create_kwargs: dict, params to pass to exec_create() :param exec_start_kwargs: dict, params to pass to exec_start() :return: iterator if non-blocking or list of bytes if blocking """ logger.info("running command %s", command) exec_create_kwargs = exec_create_kwargs or {} exec_start_kwargs = exec_start_kwargs or {} exec_start_kwargs["stream"] = True # we want stream no matter what exec_i = self.d.exec_create(self.get_id(), command, **exec_create_kwargs) output = self.d.exec_start(exec_i, **exec_start_kwargs) if blocking: response = [] for line in output: response.append(line) logger.info("%s", line.decode("utf-8").strip("\n\r")) e_inspect = self.d.exec_inspect(exec_i) exit_code = e_inspect["ExitCode"] if exit_code: logger.error("command failed") logger.info("exec metadata: %s", e_inspect) raise ConuException("failed to execute command %s, exit code %s" % ( command, exit_code)) return response # TODO: for interactive use cases we need to provide API so users can do exec_inspect return output
[ "def", "execute", "(", "self", ",", "command", ",", "blocking", "=", "True", ",", "exec_create_kwargs", "=", "None", ",", "exec_start_kwargs", "=", "None", ")", ":", "logger", ".", "info", "(", "\"running command %s\"", ",", "command", ")", "exec_create_kwargs...
Execute a command in this container -- the container needs to be running. If the command fails, a ConuException is thrown. This is a blocking call by default and writes output of the command to logger using the INFO level -- this behavior can be changed if you set the argument `blocking` to `False`. If not blocking, you should consume the returned iterator in order to see logs or know when the command finished: :: for line in container.execute(["ping", "-c", "4", "8.8.8.8"], blocking=False): print(line) print("command finished") :param command: list of str, command to execute in the container :param blocking: bool, if True blocks until the command finishes :param exec_create_kwargs: dict, params to pass to exec_create() :param exec_start_kwargs: dict, params to pass to exec_start() :return: iterator if non-blocking or list of bytes if blocking
[ "Execute", "a", "command", "in", "this", "container", "--", "the", "container", "needs", "to", "be", "running", "." ]
python
train
acorg/dark-matter
dark/filter.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/filter.py#L392-L443
def parseFASTAEditingCommandLineOptions(args, reads): """ Examine parsed FASTA editing command-line options and return information about kept sites and sequences. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @param reads: A C{Reads} instance to filter. @return: The filtered C{Reads} instance. """ removeGaps = args.removeGaps removeDescriptions = args.removeDescriptions truncateTitlesAfter = args.truncateTitlesAfter keepSites = ( parseRangeString(args.keepSites, convertToZeroBased=True) if args.keepSites else None) if args.keepSitesFile: keepSites = keepSites or set() with open(args.keepSitesFile) as fp: for lineNumber, line in enumerate(fp): try: keepSites.update( parseRangeString(line, convertToZeroBased=True)) except ValueError as e: raise ValueError( 'Keep sites file %r line %d could not be parsed: ' '%s' % (args.keepSitesFile, lineNumber, e)) removeSites = ( parseRangeString(args.removeSites, convertToZeroBased=True) if args.removeSites else None) if args.removeSitesFile: removeSites = removeSites or set() with open(args.removeSitesFile) as fp: for lineNumber, line in enumerate(fp): try: removeSites.update( parseRangeString(line, convertToZeroBased=True)) except ValueError as e: raise ValueError( 'Remove sites file %r line %d parse error: %s' % (args.removeSitesFile, lineNumber, e)) return reads.filter( removeGaps=removeGaps, truncateTitlesAfter=truncateTitlesAfter, removeDescriptions=removeDescriptions, idLambda=args.idLambda, readLambda=args.readLambda, keepSites=keepSites, removeSites=removeSites, reverse=args.reverse, reverseComplement=args.reverseComplement)
[ "def", "parseFASTAEditingCommandLineOptions", "(", "args", ",", "reads", ")", ":", "removeGaps", "=", "args", ".", "removeGaps", "removeDescriptions", "=", "args", ".", "removeDescriptions", "truncateTitlesAfter", "=", "args", ".", "truncateTitlesAfter", "keepSites", ...
Examine parsed FASTA editing command-line options and return information about kept sites and sequences. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @param reads: A C{Reads} instance to filter. @return: The filtered C{Reads} instance.
[ "Examine", "parsed", "FASTA", "editing", "command", "-", "line", "options", "and", "return", "information", "about", "kept", "sites", "and", "sequences", "." ]
python
train
kblin/ncbi-genome-download
ncbi_genome_download/core.py
https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L582-L613
def get_strain_label(entry, viral=False): """Try to extract a strain from an assemly summary entry. First this checks 'infraspecific_name', then 'isolate', then it tries to get it from 'organism_name'. If all fails, it falls back to just returning the assembly accesion number. """ def get_strain(entry): strain = entry['infraspecific_name'] if strain != '': strain = strain.split('=')[-1] return strain strain = entry['isolate'] if strain != '': return strain if len(entry['organism_name'].split(' ')) > 2 and not viral: strain = ' '.join(entry['organism_name'].split(' ')[2:]) return strain return entry['assembly_accession'] def cleanup(strain): strain = strain.strip() strain = strain.replace(' ', '_') strain = strain.replace(';', '_') strain = strain.replace('/', '_') strain = strain.replace('\\', '_') return strain return cleanup(get_strain(entry))
[ "def", "get_strain_label", "(", "entry", ",", "viral", "=", "False", ")", ":", "def", "get_strain", "(", "entry", ")", ":", "strain", "=", "entry", "[", "'infraspecific_name'", "]", "if", "strain", "!=", "''", ":", "strain", "=", "strain", ".", "split", ...
Try to extract a strain from an assemly summary entry. First this checks 'infraspecific_name', then 'isolate', then it tries to get it from 'organism_name'. If all fails, it falls back to just returning the assembly accesion number.
[ "Try", "to", "extract", "a", "strain", "from", "an", "assemly", "summary", "entry", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/data_structures/sframe.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L5590-L5655
def dropna_split(self, columns=None, how='any'): """ Split rows with missing values from this SFrame. This function has the same functionality as :py:func:`~turicreate.SFrame.dropna`, but returns a tuple of two SFrames. The first item is the expected output from :py:func:`~turicreate.SFrame.dropna`, and the second item contains all the rows filtered out by the `dropna` algorithm. Parameters ---------- columns : list or str, optional The columns to use when looking for missing values. By default, all columns are used. how : {'any', 'all'}, optional Specifies whether a row should be dropped if at least one column has missing values, or if all columns have missing values. 'any' is default. Returns ------- out : (SFrame, SFrame) (SFrame with missing values removed, SFrame with the removed missing values) See Also -------- dropna Examples -------- >>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]}) >>> good, bad = sf.dropna_split() >>> good +---+---+ | a | b | +---+---+ | 1 | a | +---+---+ [1 rows x 2 columns] >>> bad +------+------+ | a | b | +------+------+ | None | b | | None | None | +------+------+ [2 rows x 2 columns] """ # If the user gives me an empty list (the indicator to use all columns) # NA values being dropped would not be the expected behavior. This # is a NOOP, so let's not bother the server if type(columns) is list and len(columns) == 0: return (SFrame(_proxy=self.__proxy__), SFrame()) (columns, all_behavior) = self.__dropna_errchk(columns, how) sframe_tuple = self.__proxy__.drop_missing_values(columns, all_behavior, True) if len(sframe_tuple) != 2: raise RuntimeError("Did not return two SFrames!") with cython_context(): return (SFrame(_proxy=sframe_tuple[0]), SFrame(_proxy=sframe_tuple[1]))
[ "def", "dropna_split", "(", "self", ",", "columns", "=", "None", ",", "how", "=", "'any'", ")", ":", "# If the user gives me an empty list (the indicator to use all columns)", "# NA values being dropped would not be the expected behavior. This", "# is a NOOP, so let's not bother the ...
Split rows with missing values from this SFrame. This function has the same functionality as :py:func:`~turicreate.SFrame.dropna`, but returns a tuple of two SFrames. The first item is the expected output from :py:func:`~turicreate.SFrame.dropna`, and the second item contains all the rows filtered out by the `dropna` algorithm. Parameters ---------- columns : list or str, optional The columns to use when looking for missing values. By default, all columns are used. how : {'any', 'all'}, optional Specifies whether a row should be dropped if at least one column has missing values, or if all columns have missing values. 'any' is default. Returns ------- out : (SFrame, SFrame) (SFrame with missing values removed, SFrame with the removed missing values) See Also -------- dropna Examples -------- >>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]}) >>> good, bad = sf.dropna_split() >>> good +---+---+ | a | b | +---+---+ | 1 | a | +---+---+ [1 rows x 2 columns] >>> bad +------+------+ | a | b | +------+------+ | None | b | | None | None | +------+------+ [2 rows x 2 columns]
[ "Split", "rows", "with", "missing", "values", "from", "this", "SFrame", ".", "This", "function", "has", "the", "same", "functionality", "as", ":", "py", ":", "func", ":", "~turicreate", ".", "SFrame", ".", "dropna", "but", "returns", "a", "tuple", "of", ...
python
train
ff0000/scarlet
scarlet/cms/list.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/list.py#L112-L124
def get_filter_form(self, **kwargs): """ If there is a filter_form, initializes that form with the contents of request.GET and returns it. """ form = None if self.filter_form: form = self.filter_form(self.request.GET) elif self.model and hasattr(self.model._meta, '_is_view'): form = VersionFilterForm(self.request.GET) return form
[ "def", "get_filter_form", "(", "self", ",", "*", "*", "kwargs", ")", ":", "form", "=", "None", "if", "self", ".", "filter_form", ":", "form", "=", "self", ".", "filter_form", "(", "self", ".", "request", ".", "GET", ")", "elif", "self", ".", "model",...
If there is a filter_form, initializes that form with the contents of request.GET and returns it.
[ "If", "there", "is", "a", "filter_form", "initializes", "that", "form", "with", "the", "contents", "of", "request", ".", "GET", "and", "returns", "it", "." ]
python
train
jwass/geojsonio.py
geojsonio/geojsonio.py
https://github.com/jwass/geojsonio.py/blob/8229a48238f128837e6dce49f18310df84968825/geojsonio/geojsonio.py#L18-L38
def display(contents, domain=DEFAULT_DOMAIN, force_gist=False): """ Open a web browser pointing to geojson.io with the specified content. If the content is large, an anonymous gist will be created on github and the URL will instruct geojson.io to download the gist data and then display. If the content is small, this step is not needed as the data can be included in the URL Parameters ---------- content - (see make_geojson) domain - string, default http://geojson.io force_gist - bool, default False Create an anonymous gist on Github regardless of the size of the contents """ url = make_url(contents, domain, force_gist) webbrowser.open(url) return url
[ "def", "display", "(", "contents", ",", "domain", "=", "DEFAULT_DOMAIN", ",", "force_gist", "=", "False", ")", ":", "url", "=", "make_url", "(", "contents", ",", "domain", ",", "force_gist", ")", "webbrowser", ".", "open", "(", "url", ")", "return", "url...
Open a web browser pointing to geojson.io with the specified content. If the content is large, an anonymous gist will be created on github and the URL will instruct geojson.io to download the gist data and then display. If the content is small, this step is not needed as the data can be included in the URL Parameters ---------- content - (see make_geojson) domain - string, default http://geojson.io force_gist - bool, default False Create an anonymous gist on Github regardless of the size of the contents
[ "Open", "a", "web", "browser", "pointing", "to", "geojson", ".", "io", "with", "the", "specified", "content", "." ]
python
train
google/grr
grr/core/executables/python_hacks/modify_network.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/executables/python_hacks/modify_network.py#L39-L57
def DisableInterfaces(interface): """Tries to disable an interface. Only works on Vista and 7. Args: interface: Name of the interface to disable. Returns: res which is a tuple of (stdout, stderr, exit_status, time_taken). """ set_tested_versions = ['vista', '2008'] set_args = ['/c', 'netsh', 'set', 'interface', interface, 'DISABLED'] host_version = platform.platform().lower() for version in set_tested_versions: if host_version.find(version) != -1: # pylint: disable=undefined-variable res = client_utils_common.Execute( 'cmd', set_args, time_limit=-1, bypass_whitelist=True) return res return ('', 'Command not available for this version.', 99, '')
[ "def", "DisableInterfaces", "(", "interface", ")", ":", "set_tested_versions", "=", "[", "'vista'", ",", "'2008'", "]", "set_args", "=", "[", "'/c'", ",", "'netsh'", ",", "'set'", ",", "'interface'", ",", "interface", ",", "'DISABLED'", "]", "host_version", ...
Tries to disable an interface. Only works on Vista and 7. Args: interface: Name of the interface to disable. Returns: res which is a tuple of (stdout, stderr, exit_status, time_taken).
[ "Tries", "to", "disable", "an", "interface", ".", "Only", "works", "on", "Vista", "and", "7", "." ]
python
train
inveniosoftware/invenio-iiif
invenio_iiif/ext.py
https://github.com/inveniosoftware/invenio-iiif/blob/e4f2f93eaabdc8e2efea81c239ab76d481191959/invenio_iiif/ext.py#L46-L50
def init_app(self, app): """Flask application initialization.""" super(InvenioIIIFAPI, self).init_app(app) api = Api(app=app) self.iiif_ext.init_restful(api, prefix=app.config['IIIF_API_PREFIX'])
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "super", "(", "InvenioIIIFAPI", ",", "self", ")", ".", "init_app", "(", "app", ")", "api", "=", "Api", "(", "app", "=", "app", ")", "self", ".", "iiif_ext", ".", "init_restful", "(", "api", ",",...
Flask application initialization.
[ "Flask", "application", "initialization", "." ]
python
train
biosustain/optlang
optlang/interface.py
https://github.com/biosustain/optlang/blob/13673ac26f6b3ba37a2ef392489722c52e3c5ff1/optlang/interface.py#L921-L936
def to_json(self): """ Returns a json-compatible object from the objective that can be saved using the json module. Example -------- >>> import json >>> with open("path_to_file.json", "w") as outfile: >>> json.dump(obj.to_json(), outfile) """ json_obj = { "name": self.name, "expression": expr_to_json(self.expression), "direction": self.direction } return json_obj
[ "def", "to_json", "(", "self", ")", ":", "json_obj", "=", "{", "\"name\"", ":", "self", ".", "name", ",", "\"expression\"", ":", "expr_to_json", "(", "self", ".", "expression", ")", ",", "\"direction\"", ":", "self", ".", "direction", "}", "return", "jso...
Returns a json-compatible object from the objective that can be saved using the json module. Example -------- >>> import json >>> with open("path_to_file.json", "w") as outfile: >>> json.dump(obj.to_json(), outfile)
[ "Returns", "a", "json", "-", "compatible", "object", "from", "the", "objective", "that", "can", "be", "saved", "using", "the", "json", "module", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1867-L1922
def add_reorganize_data(self, name, input_name, output_name, mode = 'SPACE_TO_DEPTH', block_size = 2): """ Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE". Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str - If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension. Input is spatially divided into non-overlapping blocks of size block_size X block_size and data from each block is moved to the channel dimension. Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size]. - If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension. Reverse of the operation 'SPACE_TO_DEPTH'. Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. block_size: int Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size) must divide C when mode is 'DEPTH_TO_SPACE'. See Also -------- add_flatten, add_reshape """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.reorganizeData # Set the parameters if block_size < 2: raise ValueError("Invalid block_size value %d. Must be greater than 1." % block_size) spec_layer_params.blockSize = block_size if mode == 'SPACE_TO_DEPTH': spec_layer_params.mode = \ _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('SPACE_TO_DEPTH') elif mode == 'DEPTH_TO_SPACE': spec_layer_params.mode = \ _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('DEPTH_TO_SPACE') else: raise NotImplementedError( 'Unknown reorganization mode %s ' % mode)
[ "def", "add_reorganize_data", "(", "self", ",", "name", ",", "input_name", ",", "output_name", ",", "mode", "=", "'SPACE_TO_DEPTH'", ",", "block_size", "=", "2", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a n...
Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE". Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str - If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension. Input is spatially divided into non-overlapping blocks of size block_size X block_size and data from each block is moved to the channel dimension. Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size]. - If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension. Reverse of the operation 'SPACE_TO_DEPTH'. Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. block_size: int Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size) must divide C when mode is 'DEPTH_TO_SPACE'. See Also -------- add_flatten, add_reshape
[ "Add", "a", "data", "reorganization", "layer", "of", "type", "SPACE_TO_DEPTH", "or", "DEPTH_TO_SPACE", "." ]
python
train
CamDavidsonPilon/lifetimes
lifetimes/fitters/modified_beta_geo_fitter.py
https://github.com/CamDavidsonPilon/lifetimes/blob/f926308bc03c17c1d12fead729de43885cf13321/lifetimes/fitters/modified_beta_geo_fitter.py#L58-L113
def fit( self, frequency, recency, T, weights=None, initial_params=None, verbose=False, tol=1e-7, index=None, **kwargs ): """ Fit the data to the MBG/NBD model. Parameters ---------- frequency: array_like the frequency vector of customers' purchases (denoted x in literature). recency: array_like the recency vector of customers' purchases (denoted t_x in literature). T: array_like customers' age (time units since first purchase) weights: None or array_like Number of customers with given frequency/recency/T, defaults to 1 if not specified. Fader and Hardie condense the individual RFM matrix into all observed combinations of frequency/recency/T. This parameter represents the count of customers with a given purchase pattern. Instead of calculating individual log-likelihood, the log-likelihood is calculated for each pattern and multiplied by the number of customers with that pattern. verbose : bool, optional set to true to print out convergence diagnostics. tol : float, optional tolerance for termination of the function minimization process. index: array_like, optional index for resulted DataFrame which is accessible via self.data kwargs: key word arguments to pass to the scipy.optimize.minimize function as options dict Returns ------- ModifiedBetaGeoFitter: With additional properties and methods like ``params_`` and ``predict`` """ # although the parent method is called, this class's # _negative_log_likelihood is referenced super(ModifiedBetaGeoFitter, self).fit( frequency, recency, T, weights, initial_params, verbose, tol, index=index, **kwargs ) # this needs to be reassigned from the parent method self.generate_new_data = lambda size=1: modified_beta_geometric_nbd_model( T, *self._unload_params("r", "alpha", "a", "b"), size=size ) self.variance_matrix_ = self._compute_variance_matrix() self.standard_errors_ = self._compute_standard_errors() self.confidence_intervals_ = self._compute_confidence_intervals() return self
[ "def", "fit", "(", "self", ",", "frequency", ",", "recency", ",", "T", ",", "weights", "=", "None", ",", "initial_params", "=", "None", ",", "verbose", "=", "False", ",", "tol", "=", "1e-7", ",", "index", "=", "None", ",", "*", "*", "kwargs", ")", ...
Fit the data to the MBG/NBD model. Parameters ---------- frequency: array_like the frequency vector of customers' purchases (denoted x in literature). recency: array_like the recency vector of customers' purchases (denoted t_x in literature). T: array_like customers' age (time units since first purchase) weights: None or array_like Number of customers with given frequency/recency/T, defaults to 1 if not specified. Fader and Hardie condense the individual RFM matrix into all observed combinations of frequency/recency/T. This parameter represents the count of customers with a given purchase pattern. Instead of calculating individual log-likelihood, the log-likelihood is calculated for each pattern and multiplied by the number of customers with that pattern. verbose : bool, optional set to true to print out convergence diagnostics. tol : float, optional tolerance for termination of the function minimization process. index: array_like, optional index for resulted DataFrame which is accessible via self.data kwargs: key word arguments to pass to the scipy.optimize.minimize function as options dict Returns ------- ModifiedBetaGeoFitter: With additional properties and methods like ``params_`` and ``predict``
[ "Fit", "the", "data", "to", "the", "MBG", "/", "NBD", "model", "." ]
python
train
inveniosoftware/invenio-pages
invenio_pages/admin.py
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/admin.py#L72-L77
def same_page_choosen(form, field): """Check that we are not trying to assign list page itself as a child.""" if form._obj is not None: if field.data.id == form._obj.list_id: raise ValidationError( _('You cannot assign list page itself as a child.'))
[ "def", "same_page_choosen", "(", "form", ",", "field", ")", ":", "if", "form", ".", "_obj", "is", "not", "None", ":", "if", "field", ".", "data", ".", "id", "==", "form", ".", "_obj", ".", "list_id", ":", "raise", "ValidationError", "(", "_", "(", ...
Check that we are not trying to assign list page itself as a child.
[ "Check", "that", "we", "are", "not", "trying", "to", "assign", "list", "page", "itself", "as", "a", "child", "." ]
python
train
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/signature.py
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/signature.py#L82-L88
def signature(name: str) -> Optional[Tuple]: """ Return the file or URL signature for name :param name: :return: """ return url_signature(name) if is_url(name) else file_signature(name) if is_file(name) else None
[ "def", "signature", "(", "name", ":", "str", ")", "->", "Optional", "[", "Tuple", "]", ":", "return", "url_signature", "(", "name", ")", "if", "is_url", "(", "name", ")", "else", "file_signature", "(", "name", ")", "if", "is_file", "(", "name", ")", ...
Return the file or URL signature for name :param name: :return:
[ "Return", "the", "file", "or", "URL", "signature", "for", "name", ":", "param", "name", ":", ":", "return", ":" ]
python
train
projectatomic/atomic-reactor
atomic_reactor/api.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/api.py#L124-L145
def build_image_here(source, image, parent_registry=None, target_registries=None, parent_registry_insecure=False, target_registries_insecure=False, dont_pull_base_image=False, **kwargs): """ build image from provided dockerfile (specified by `source`) in current environment :param source: dict, where/how to get source code to put in image :param image: str, tag for built image ([registry/]image_name[:tag]) :param parent_registry: str, registry to pull base image from :param target_registries: list of str, list of registries to push image to (might change in future) :param parent_registry_insecure: bool, allow connecting to parent registry over plain http :param target_registries_insecure: bool, allow connecting to target registries over plain http :param dont_pull_base_image: bool, don't pull or update base image specified in dockerfile :return: BuildResults """ build_json = _prepare_build_json(image, source, parent_registry, target_registries, parent_registry_insecure, target_registries_insecure, dont_pull_base_image, **kwargs) m = DockerBuildWorkflow(**build_json) return m.build_docker_image()
[ "def", "build_image_here", "(", "source", ",", "image", ",", "parent_registry", "=", "None", ",", "target_registries", "=", "None", ",", "parent_registry_insecure", "=", "False", ",", "target_registries_insecure", "=", "False", ",", "dont_pull_base_image", "=", "Fal...
build image from provided dockerfile (specified by `source`) in current environment :param source: dict, where/how to get source code to put in image :param image: str, tag for built image ([registry/]image_name[:tag]) :param parent_registry: str, registry to pull base image from :param target_registries: list of str, list of registries to push image to (might change in future) :param parent_registry_insecure: bool, allow connecting to parent registry over plain http :param target_registries_insecure: bool, allow connecting to target registries over plain http :param dont_pull_base_image: bool, don't pull or update base image specified in dockerfile :return: BuildResults
[ "build", "image", "from", "provided", "dockerfile", "(", "specified", "by", "source", ")", "in", "current", "environment" ]
python
train
eandersson/amqpstorm
amqpstorm/queue.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/queue.py#L18-L55
def declare(self, queue='', passive=False, durable=False, exclusive=False, auto_delete=False, arguments=None): """Declare a Queue. :param str queue: Queue name :param bool passive: Do not create :param bool durable: Durable queue :param bool exclusive: Request exclusive access :param bool auto_delete: Automatically delete when not in use :param dict arguments: Queue key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not isinstance(passive, bool): raise AMQPInvalidArgument('passive should be a boolean') elif not isinstance(durable, bool): raise AMQPInvalidArgument('durable should be a boolean') elif not isinstance(exclusive, bool): raise AMQPInvalidArgument('exclusive should be a boolean') elif not isinstance(auto_delete, bool): raise AMQPInvalidArgument('auto_delete should be a boolean') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') declare_frame = pamqp_queue.Declare(queue=queue, passive=passive, durable=durable, exclusive=exclusive, auto_delete=auto_delete, arguments=arguments) return self._channel.rpc_request(declare_frame)
[ "def", "declare", "(", "self", ",", "queue", "=", "''", ",", "passive", "=", "False", ",", "durable", "=", "False", ",", "exclusive", "=", "False", ",", "auto_delete", "=", "False", ",", "arguments", "=", "None", ")", ":", "if", "not", "compatibility",...
Declare a Queue. :param str queue: Queue name :param bool passive: Do not create :param bool durable: Durable queue :param bool exclusive: Request exclusive access :param bool auto_delete: Automatically delete when not in use :param dict arguments: Queue key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
[ "Declare", "a", "Queue", "." ]
python
train
spyder-ide/spyder
spyder/plugins/help/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/help/plugin.py#L328-L330
def set_rich_text_font(self, font): """Set rich text mode font""" self.rich_text.set_font(font, fixed_font=self.get_plugin_font())
[ "def", "set_rich_text_font", "(", "self", ",", "font", ")", ":", "self", ".", "rich_text", ".", "set_font", "(", "font", ",", "fixed_font", "=", "self", ".", "get_plugin_font", "(", ")", ")" ]
Set rich text mode font
[ "Set", "rich", "text", "mode", "font" ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/Network.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Network.py#L417-L479
def add_network_ipv6_hosts( self, id_vlan, id_tipo_rede, num_hosts, id_ambiente_vip=None): """ Add new networkipv6 :param id_vlan: Identifier of the Vlan. Integer value and greater than zero. :param id_tipo_rede: Identifier of the NetworkType. Integer value and greater than zero. :param num_hosts: Number of hosts expected. Integer value and greater than zero. :param id_ambiente_vip: Identifier of the Environment Vip. Integer value and greater than zero. :return: Following dictionary: :: {'vlan': {'id': < id_vlan >, 'nome': < nome_vlan >, 'num_vlan': < num_vlan >, 'id_tipo_rede': < id_tipo_rede >, 'id_ambiente': < id_ambiente >, 'rede_oct1': < rede_oct1 >, 'rede_oct2': < rede_oct2 >, 'rede_oct3': < rede_oct3 >, 'rede_oct4': < rede_oct4 >, 'rede_oct5': < rede_oct4 >, 'rede_oct6': < rede_oct4 >, 'rede_oct7': < rede_oct4 >, 'rede_oct8': < rede_oct4 >, 'bloco': < bloco >, 'mascara_oct1': < mascara_oct1 >, 'mascara_oct2': < mascara_oct2 >, 'mascara_oct3': < mascara_oct3 >, 'mascara_oct4': < mascara_oct4 >, 'mascara_oct5': < mascara_oct4 >, 'mascara_oct6': < mascara_oct4 >, 'mascara_oct7': < mascara_oct4 >, 'mascara_oct8': < mascara_oct4 >, 'broadcast': < broadcast >, 'descricao': < descricao >, 'acl_file_name': < acl_file_name >, 'acl_valida': < acl_valida >, 'ativada': < ativada >}} :raise TipoRedeNaoExisteError: NetworkType not found. :raise InvalidParameterError: Invalid ID for Vlan or NetworkType. :raise EnvironmentVipNotFoundError: Environment VIP not registered. :raise IPNaoDisponivelError: Network address unavailable to create a NetworkIPv6. :raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ vlan_map = dict() vlan_map['id_vlan'] = id_vlan vlan_map['id_tipo_rede'] = id_tipo_rede vlan_map['num_hosts'] = num_hosts vlan_map['id_ambiente_vip'] = id_ambiente_vip code, xml = self.submit({'vlan': vlan_map}, 'PUT', 'network/ipv6/add/') return self.response(code, xml)
[ "def", "add_network_ipv6_hosts", "(", "self", ",", "id_vlan", ",", "id_tipo_rede", ",", "num_hosts", ",", "id_ambiente_vip", "=", "None", ")", ":", "vlan_map", "=", "dict", "(", ")", "vlan_map", "[", "'id_vlan'", "]", "=", "id_vlan", "vlan_map", "[", "'id_ti...
Add new networkipv6 :param id_vlan: Identifier of the Vlan. Integer value and greater than zero. :param id_tipo_rede: Identifier of the NetworkType. Integer value and greater than zero. :param num_hosts: Number of hosts expected. Integer value and greater than zero. :param id_ambiente_vip: Identifier of the Environment Vip. Integer value and greater than zero. :return: Following dictionary: :: {'vlan': {'id': < id_vlan >, 'nome': < nome_vlan >, 'num_vlan': < num_vlan >, 'id_tipo_rede': < id_tipo_rede >, 'id_ambiente': < id_ambiente >, 'rede_oct1': < rede_oct1 >, 'rede_oct2': < rede_oct2 >, 'rede_oct3': < rede_oct3 >, 'rede_oct4': < rede_oct4 >, 'rede_oct5': < rede_oct4 >, 'rede_oct6': < rede_oct4 >, 'rede_oct7': < rede_oct4 >, 'rede_oct8': < rede_oct4 >, 'bloco': < bloco >, 'mascara_oct1': < mascara_oct1 >, 'mascara_oct2': < mascara_oct2 >, 'mascara_oct3': < mascara_oct3 >, 'mascara_oct4': < mascara_oct4 >, 'mascara_oct5': < mascara_oct4 >, 'mascara_oct6': < mascara_oct4 >, 'mascara_oct7': < mascara_oct4 >, 'mascara_oct8': < mascara_oct4 >, 'broadcast': < broadcast >, 'descricao': < descricao >, 'acl_file_name': < acl_file_name >, 'acl_valida': < acl_valida >, 'ativada': < ativada >}} :raise TipoRedeNaoExisteError: NetworkType not found. :raise InvalidParameterError: Invalid ID for Vlan or NetworkType. :raise EnvironmentVipNotFoundError: Environment VIP not registered. :raise IPNaoDisponivelError: Network address unavailable to create a NetworkIPv6. :raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "Add", "new", "networkipv6" ]
python
train
mrcagney/make_gtfs
make_gtfs/protofeed.py
https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/protofeed.py#L73-L86
def copy(self): """ Return a copy of this ProtoFeed, that is, a feed with all the same attributes. """ other = ProtoFeed() for key in cs.PROTOFEED_ATTRS: value = getattr(self, key) if isinstance(value, pd.DataFrame): # Pandas copy DataFrame value = value.copy() setattr(other, key, value) return other
[ "def", "copy", "(", "self", ")", ":", "other", "=", "ProtoFeed", "(", ")", "for", "key", "in", "cs", ".", "PROTOFEED_ATTRS", ":", "value", "=", "getattr", "(", "self", ",", "key", ")", "if", "isinstance", "(", "value", ",", "pd", ".", "DataFrame", ...
Return a copy of this ProtoFeed, that is, a feed with all the same attributes.
[ "Return", "a", "copy", "of", "this", "ProtoFeed", "that", "is", "a", "feed", "with", "all", "the", "same", "attributes", "." ]
python
train
hozn/coilmq
coilmq/engine.py
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/engine.py#L87-L94
def unbind(self): """ Unbinds this connection from queue and topic managers (freeing up resources) and resets state. """ self.connected = False self.queue_manager.disconnect(self.connection) self.topic_manager.disconnect(self.connection)
[ "def", "unbind", "(", "self", ")", ":", "self", ".", "connected", "=", "False", "self", ".", "queue_manager", ".", "disconnect", "(", "self", ".", "connection", ")", "self", ".", "topic_manager", ".", "disconnect", "(", "self", ".", "connection", ")" ]
Unbinds this connection from queue and topic managers (freeing up resources) and resets state.
[ "Unbinds", "this", "connection", "from", "queue", "and", "topic", "managers", "(", "freeing", "up", "resources", ")", "and", "resets", "state", "." ]
python
train
hyperledger/indy-plenum
plenum/server/node.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L2569-L2610
def processPropagate(self, msg: Propagate, frm): """ Process one propagateRequest sent to this node asynchronously - If this propagateRequest hasn't been seen by this node, then broadcast it to all nodes after verifying the the signature. - Add the client to blacklist if its signature is invalid :param msg: the propagateRequest :param frm: the name of the node which sent this `msg` """ logger.debug("{} received propagated request: {}". format(self.name, msg)) request = TxnUtilConfig.client_request_class(**msg.request) clientName = msg.senderClient if not self.isProcessingReq(request.key): ledger_id, seq_no = self.seqNoDB.get_by_payload_digest(request.payload_digest) if ledger_id is not None and seq_no is not None: self._clean_req_from_verified(request) logger.debug("{} ignoring propagated request {} " "since it has been already ordered" .format(self.name, msg)) return self.startedProcessingReq(request.key, clientName) # forced request should be processed before consensus self.handle_request_if_forced(request) else: if clientName is not None and \ not self.is_sender_known_for_req(request.key): # Since some propagates might not include the client name self.set_sender_for_req(request.key, clientName) self.requests.add_propagate(request, frm) self.propagate(request, clientName) self.tryForwarding(request)
[ "def", "processPropagate", "(", "self", ",", "msg", ":", "Propagate", ",", "frm", ")", ":", "logger", ".", "debug", "(", "\"{} received propagated request: {}\"", ".", "format", "(", "self", ".", "name", ",", "msg", ")", ")", "request", "=", "TxnUtilConfig",...
Process one propagateRequest sent to this node asynchronously - If this propagateRequest hasn't been seen by this node, then broadcast it to all nodes after verifying the the signature. - Add the client to blacklist if its signature is invalid :param msg: the propagateRequest :param frm: the name of the node which sent this `msg`
[ "Process", "one", "propagateRequest", "sent", "to", "this", "node", "asynchronously" ]
python
train
martinpitt/python-dbusmock
dbusmock/templates/bluez4.py
https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/templates/bluez4.py#L274-L283
def ListDevices(self): '''List all known devices ''' devices = [] for obj in mockobject.objects.keys(): if obj.startswith('/org/bluez/') and 'dev_' in obj: devices.append(dbus.ObjectPath(obj, variant_level=1)) return dbus.Array(devices, variant_level=1)
[ "def", "ListDevices", "(", "self", ")", ":", "devices", "=", "[", "]", "for", "obj", "in", "mockobject", ".", "objects", ".", "keys", "(", ")", ":", "if", "obj", ".", "startswith", "(", "'/org/bluez/'", ")", "and", "'dev_'", "in", "obj", ":", "device...
List all known devices
[ "List", "all", "known", "devices" ]
python
train
matthewdeanmartin/jiggle_version
jiggle_version/module_finder.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/module_finder.py#L238-L271
def find_single_file_project(self): # type: () -> List[str] """ Take first non-setup.py python file. What a mess. :return: """ # TODO: use package_dirs packaged_dirs = "" try: # Right now only returns 1st. packaged_dirs = self.extract_package_dir() except: pass likely_src_folders = [".", "src/"] if packaged_dirs: likely_src_folders.append(packaged_dirs) candidates = [] for likely_src in likely_src_folders: if not os.path.isdir(likely_src): continue files = [f for f in os.listdir(likely_src) if os.path.isfile(f)] # BUG: doesn't deal with src/foo/bar.py for file in files: if file.endswith("setup.py") or file == "setup": continue # duh if file.endswith(".py"): candidate = file.replace(".py", "") if candidate != "setup": candidates.append(candidate) else: if self.file_opener.is_python_inside(file): candidates.append(file) return candidates
[ "def", "find_single_file_project", "(", "self", ")", ":", "# type: () -> List[str]", "# TODO: use package_dirs", "packaged_dirs", "=", "\"\"", "try", ":", "# Right now only returns 1st.", "packaged_dirs", "=", "self", ".", "extract_package_dir", "(", ")", "except", ":", ...
Take first non-setup.py python file. What a mess. :return:
[ "Take", "first", "non", "-", "setup", ".", "py", "python", "file", ".", "What", "a", "mess", ".", ":", "return", ":" ]
python
train
charnley/rmsd
rmsd/calculate_rmsd.py
https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L198-L207
def makeW(r1, r2, r3, r4=0): """ matrix involved in quaternion rotation """ W = np.asarray([ [r4, r3, -r2, r1], [-r3, r4, r1, r2], [r2, -r1, r4, r3], [-r1, -r2, -r3, r4]]) return W
[ "def", "makeW", "(", "r1", ",", "r2", ",", "r3", ",", "r4", "=", "0", ")", ":", "W", "=", "np", ".", "asarray", "(", "[", "[", "r4", ",", "r3", ",", "-", "r2", ",", "r1", "]", ",", "[", "-", "r3", ",", "r4", ",", "r1", ",", "r2", "]",...
matrix involved in quaternion rotation
[ "matrix", "involved", "in", "quaternion", "rotation" ]
python
train
mromanello/hucitlib
knowledge_base/surfext/__init__.py
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/surfext/__init__.py#L225-L343
def to_json(self): """ Serialises a HucitAuthor to a JSON formatted string. Example: >> homer = kb.get_resource_by_urn("urn:cts:greekLit:tlg0012") >> homer.to_json() { "name_abbreviations": [ "Hom." ], "urn": "urn:cts:greekLit:tlg0012", "works": [ { "urn": "urn:cts:greekLit:tlg0012.tlg001", "titles": [ { "language": "it", "label": "Iliade" }, { "language": "la", "label": "Ilias" }, { "language": "en", "label": "Iliad" }, { "language": "de", "label": "Ilias" }, { "language": "fr", "label": "L'Iliade" } ], "uri": "http://purl.org/hucit/kb/works/2815", "title_abbreviations": [ "Il." ] }, { "urn": "urn:cts:greekLit:tlg0012.tlg002", "titles": [ { "language": "en", "label": "Odyssey" }, { "language": "fr", "label": "L'Odyss\u00e9e" }, { "language": "it", "label": "Odissea" }, { "language": "la", "label": "Odyssea" }, { "language": "de", "label": "Odyssee" } ], "uri": "http://purl.org/hucit/kb/works/2816", "title_abbreviations": [ "Od." ] }, { "urn": "urn:cts:cwkb:927.2814", "titles": [ { "language": "la", "label": "Epigrammata" } ], "uri": "http://purl.org/hucit/kb/works/2814", "title_abbreviations": [ "Epigr." ] } ], "uri": "http://purl.org/hucit/kb/authors/927", "names": [ { "language": "fr", "label": "Hom\u00e8re" }, { "language": "la", "label": "Homerus" }, { "language": null, "label": "Homeros" }, { "language": "en", "label": "Homer" }, { "language": "it", "label": "Omero" } ] } """ names = self.get_names() return json.dumps({ "uri" : self.subject , "urn" : str(self.get_urn()) , "names" : [{"language":lang, "label":label} for lang, label in names] , "name_abbreviations" : self.get_abbreviations() , "works" : [json.loads(work.to_json()) for work in self.get_works()] }, indent=2)
[ "def", "to_json", "(", "self", ")", ":", "names", "=", "self", ".", "get_names", "(", ")", "return", "json", ".", "dumps", "(", "{", "\"uri\"", ":", "self", ".", "subject", ",", "\"urn\"", ":", "str", "(", "self", ".", "get_urn", "(", ")", ")", "...
Serialises a HucitAuthor to a JSON formatted string. Example: >> homer = kb.get_resource_by_urn("urn:cts:greekLit:tlg0012") >> homer.to_json() { "name_abbreviations": [ "Hom." ], "urn": "urn:cts:greekLit:tlg0012", "works": [ { "urn": "urn:cts:greekLit:tlg0012.tlg001", "titles": [ { "language": "it", "label": "Iliade" }, { "language": "la", "label": "Ilias" }, { "language": "en", "label": "Iliad" }, { "language": "de", "label": "Ilias" }, { "language": "fr", "label": "L'Iliade" } ], "uri": "http://purl.org/hucit/kb/works/2815", "title_abbreviations": [ "Il." ] }, { "urn": "urn:cts:greekLit:tlg0012.tlg002", "titles": [ { "language": "en", "label": "Odyssey" }, { "language": "fr", "label": "L'Odyss\u00e9e" }, { "language": "it", "label": "Odissea" }, { "language": "la", "label": "Odyssea" }, { "language": "de", "label": "Odyssee" } ], "uri": "http://purl.org/hucit/kb/works/2816", "title_abbreviations": [ "Od." ] }, { "urn": "urn:cts:cwkb:927.2814", "titles": [ { "language": "la", "label": "Epigrammata" } ], "uri": "http://purl.org/hucit/kb/works/2814", "title_abbreviations": [ "Epigr." ] } ], "uri": "http://purl.org/hucit/kb/authors/927", "names": [ { "language": "fr", "label": "Hom\u00e8re" }, { "language": "la", "label": "Homerus" }, { "language": null, "label": "Homeros" }, { "language": "en", "label": "Homer" }, { "language": "it", "label": "Omero" } ] }
[ "Serialises", "a", "HucitAuthor", "to", "a", "JSON", "formatted", "string", "." ]
python
train
gwastro/pycbc
pycbc/tmpltbank/em_progenitors.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/em_progenitors.py#L388-L411
def xi_eq(x, kappa, chi_eff, q): """ The roots of this equation determine the orbital radius at the onset of NS tidal disruption in a nonprecessing NS-BH binary [(7) in Foucart PRD 86, 124007 (2012)] Parameters ----------- x: float orbital separation in units of the NS radius kappa: float the BH mass divided by the NS radius chi_eff: float the BH dimensionless spin parameter q: float the binary mass ratio (BH mass / NS mass) Returns ---------- float x**3*(x**2-3*kappa*x+2*chi_eff*kappa*sqrt[kappa*x) -3*q*(x**2-2*kappa*x+(chi_eff*kappa)**2) """ return x**3*(x**2-3*kappa*x+2*chi_eff*kappa*math.sqrt(kappa*x))-3*q*(x**2-2*kappa*x+(chi_eff*kappa)**2)
[ "def", "xi_eq", "(", "x", ",", "kappa", ",", "chi_eff", ",", "q", ")", ":", "return", "x", "**", "3", "*", "(", "x", "**", "2", "-", "3", "*", "kappa", "*", "x", "+", "2", "*", "chi_eff", "*", "kappa", "*", "math", ".", "sqrt", "(", "kappa"...
The roots of this equation determine the orbital radius at the onset of NS tidal disruption in a nonprecessing NS-BH binary [(7) in Foucart PRD 86, 124007 (2012)] Parameters ----------- x: float orbital separation in units of the NS radius kappa: float the BH mass divided by the NS radius chi_eff: float the BH dimensionless spin parameter q: float the binary mass ratio (BH mass / NS mass) Returns ---------- float x**3*(x**2-3*kappa*x+2*chi_eff*kappa*sqrt[kappa*x) -3*q*(x**2-2*kappa*x+(chi_eff*kappa)**2)
[ "The", "roots", "of", "this", "equation", "determine", "the", "orbital", "radius", "at", "the", "onset", "of", "NS", "tidal", "disruption", "in", "a", "nonprecessing", "NS", "-", "BH", "binary", "[", "(", "7", ")", "in", "Foucart", "PRD", "86", "124007",...
python
train
globality-corp/microcosm-flask
microcosm_flask/conventions/crud.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/conventions/crud.py#L74-L105
def configure_count(self, ns, definition): """ Register a count endpoint. The definition's func should be a count function, which must: - accept kwargs for the query string - return a count is the total number of items available The definition's request_schema will be used to process query string arguments. :param ns: the namespace :param definition: the endpoint definition """ @self.add_route(ns.collection_path, Operation.Count, ns) @qs(definition.request_schema) @wraps(definition.func) def count(**path_data): request_data = load_query_string_data(definition.request_schema) response_data = dict() count = definition.func(**merge_data(path_data, request_data)) headers = encode_count_header(count) definition.header_func(headers, response_data) response_format = self.negotiate_response_content(definition.response_formats) return dump_response_data( None, None, headers=headers, response_format=response_format, ) count.__doc__ = "Count the size of the collection of all {}".format(pluralize(ns.subject_name))
[ "def", "configure_count", "(", "self", ",", "ns", ",", "definition", ")", ":", "@", "self", ".", "add_route", "(", "ns", ".", "collection_path", ",", "Operation", ".", "Count", ",", "ns", ")", "@", "qs", "(", "definition", ".", "request_schema", ")", "...
Register a count endpoint. The definition's func should be a count function, which must: - accept kwargs for the query string - return a count is the total number of items available The definition's request_schema will be used to process query string arguments. :param ns: the namespace :param definition: the endpoint definition
[ "Register", "a", "count", "endpoint", "." ]
python
train
senseobservationsystems/commonsense-python-lib
senseapi.py
https://github.com/senseobservationsystems/commonsense-python-lib/blob/aac59a1751ef79eb830b3ca1fab6ef2c83931f87/senseapi.py#L1779-L1789
def DomainTokensCreate(self, domain_id, amount): """ This method creates tokens that can be used by users who want to join the domain. Tokens are automatically deleted after usage. Only domain managers can create tokens. """ if self.__SenseApiCall__('/domains/{0}/tokens.json'.format(domain_id), 'POST', parameters = {"amount":amount}): return True else: self.__error__ = "api call unsuccessful" return False
[ "def", "DomainTokensCreate", "(", "self", ",", "domain_id", ",", "amount", ")", ":", "if", "self", ".", "__SenseApiCall__", "(", "'/domains/{0}/tokens.json'", ".", "format", "(", "domain_id", ")", ",", "'POST'", ",", "parameters", "=", "{", "\"amount\"", ":", ...
This method creates tokens that can be used by users who want to join the domain. Tokens are automatically deleted after usage. Only domain managers can create tokens.
[ "This", "method", "creates", "tokens", "that", "can", "be", "used", "by", "users", "who", "want", "to", "join", "the", "domain", ".", "Tokens", "are", "automatically", "deleted", "after", "usage", ".", "Only", "domain", "managers", "can", "create", "tokens",...
python
train
jim-easterbrook/pywws
src/pywws/conversions.py
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/conversions.py#L190-L200
def dew_point(temp, hum): """Compute dew point, using formula from http://en.wikipedia.org/wiki/Dew_point. """ if temp is None or hum is None: return None a = 17.27 b = 237.7 gamma = ((a * temp) / (b + temp)) + math.log(float(hum) / 100.0) return (b * gamma) / (a - gamma)
[ "def", "dew_point", "(", "temp", ",", "hum", ")", ":", "if", "temp", "is", "None", "or", "hum", "is", "None", ":", "return", "None", "a", "=", "17.27", "b", "=", "237.7", "gamma", "=", "(", "(", "a", "*", "temp", ")", "/", "(", "b", "+", "tem...
Compute dew point, using formula from http://en.wikipedia.org/wiki/Dew_point.
[ "Compute", "dew", "point", "using", "formula", "from", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Dew_point", "." ]
python
train
allenai/allennlp
allennlp/semparse/contexts/quarel_utils.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/quarel_utils.py#L126-L182
def get_explanation(logical_form: str, world_extractions: JsonDict, answer_index: int, world: QuarelWorld) -> List[JsonDict]: """ Create explanation (as a list of header/content entries) for an answer """ output = [] nl_world = {} if world_extractions['world1'] != "N/A" and world_extractions['world1'] != ["N/A"]: nl_world['world1'] = nl_world_string(world_extractions['world1']) nl_world['world2'] = nl_world_string(world_extractions['world2']) output.append({ "header": "Identified two worlds", "content": [f'''world1 = {nl_world['world1']}''', f'''world2 = {nl_world['world2']}'''] }) else: nl_world['world1'] = 'world1' nl_world['world2'] = 'world2' parse = semparse_util.lisp_to_nested_expression(logical_form) if parse[0] != "infer": return None setup = parse[1] output.append({ "header": "The question is stating", "content": nl_arg(setup, nl_world) }) answers = parse[2:] output.append({ "header": "The answer options are stating", "content": ["A: " + " and ".join(nl_arg(answers[0], nl_world)), "B: " + " and ".join(nl_arg(answers[1], nl_world))] }) setup_core = setup if setup[0] == 'and': setup_core = setup[1] s_attr = setup_core[0] s_dir = world.qr_size[setup_core[1]] s_world = nl_world[setup_core[2]] a_attr = answers[answer_index][0] qr_dir = world._get_qr_coeff(strip_entity_type(s_attr), strip_entity_type(a_attr)) # pylint: disable=protected-access a_dir = s_dir * qr_dir a_world = nl_world[answers[answer_index][2]] content = [f'When {nl_attr(s_attr)} is {nl_dir(s_dir)} ' + f'then {nl_attr(a_attr)} is {nl_dir(a_dir)} (for {s_world})'] if a_world != s_world: content.append(f'''Therefore {nl_attr(a_attr)} is {nl_dir(-a_dir)} for {a_world}''') content.append(f"Therefore {chr(65+answer_index)} is the correct answer") output.append({ "header": "Theory used", "content": content }) return output
[ "def", "get_explanation", "(", "logical_form", ":", "str", ",", "world_extractions", ":", "JsonDict", ",", "answer_index", ":", "int", ",", "world", ":", "QuarelWorld", ")", "->", "List", "[", "JsonDict", "]", ":", "output", "=", "[", "]", "nl_world", "=",...
Create explanation (as a list of header/content entries) for an answer
[ "Create", "explanation", "(", "as", "a", "list", "of", "header", "/", "content", "entries", ")", "for", "an", "answer" ]
python
train
JoseAntFer/pyny3d
pyny3d/geoms.py
https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L961-L991
def contiguous(polygons): """ Static method. Check whether a set of convex polygons are all contiguous. Two polygons are considered contiguous if they share, at least, one side (two vertices). This is not a complete verification, it is a very simplified one. For a given set of polygons this method will verify that the number of common vertices among them equals or exceeds the minimum number of common vertices possible. This little algorithm will not declare a contiguous set of polygons as non-contiguous, but it can fail in the reverse for certain geometries where polygons have several common vertices among them. :param polygons: List of polygons. :type polygons: list of ``pyny.Polygon`` :return: Whether tey are contiguous. :rtype: bool """ from pyny3d.utils import sort_numpy n = len(polygons) points = sort_numpy(np.concatenate([polygon.points for polygon in polygons])) diff = np.sum(np.diff(points, axis=0), axis=1) if sum(np.isclose(diff, 0)) < n*2-2: return False else: return True
[ "def", "contiguous", "(", "polygons", ")", ":", "from", "pyny3d", ".", "utils", "import", "sort_numpy", "n", "=", "len", "(", "polygons", ")", "points", "=", "sort_numpy", "(", "np", ".", "concatenate", "(", "[", "polygon", ".", "points", "for", "polygon...
Static method. Check whether a set of convex polygons are all contiguous. Two polygons are considered contiguous if they share, at least, one side (two vertices). This is not a complete verification, it is a very simplified one. For a given set of polygons this method will verify that the number of common vertices among them equals or exceeds the minimum number of common vertices possible. This little algorithm will not declare a contiguous set of polygons as non-contiguous, but it can fail in the reverse for certain geometries where polygons have several common vertices among them. :param polygons: List of polygons. :type polygons: list of ``pyny.Polygon`` :return: Whether tey are contiguous. :rtype: bool
[ "Static", "method", ".", "Check", "whether", "a", "set", "of", "convex", "polygons", "are", "all", "contiguous", ".", "Two", "polygons", "are", "considered", "contiguous", "if", "they", "share", "at", "least", "one", "side", "(", "two", "vertices", ")", "....
python
train
GearPlug/paymentsos-python
paymentsos/tokens.py
https://github.com/GearPlug/paymentsos-python/blob/2f32ba83ae890c96799b71d49fc6740bc1081f89/paymentsos/tokens.py#L6-L38
def create_token(self, *, holder_name, card_number, credit_card_cvv, expiration_date, token_type='credit_card', identity_document=None, billing_address=None, additional_details=None): """ When creating a Token, remember to use the public-key header instead of the private-key header, and do not include the app-id header. Args: holder_name: Name of the credit card holder. card_number: Credit card number. credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted. expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy, mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy. token_type: The type of token billing_address: Address. identity_document: National identity document of the card holder. additional_details: Optional additional data stored with your token in key/value pairs. Returns: """ headers = self.client._get_public_headers() payload = { "token_type": token_type, "credit_card_cvv": credit_card_cvv, "card_number": card_number, "expiration_date": expiration_date, "holder_name": holder_name, "identity_document": identity_document, "billing_address": billing_address, "additional_details": additional_details, } endpoint = '/tokens' return self.client._post(self.client.URL_BASE + endpoint, json=payload, headers=headers)
[ "def", "create_token", "(", "self", ",", "*", ",", "holder_name", ",", "card_number", ",", "credit_card_cvv", ",", "expiration_date", ",", "token_type", "=", "'credit_card'", ",", "identity_document", "=", "None", ",", "billing_address", "=", "None", ",", "addit...
When creating a Token, remember to use the public-key header instead of the private-key header, and do not include the app-id header. Args: holder_name: Name of the credit card holder. card_number: Credit card number. credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted. expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy, mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy. token_type: The type of token billing_address: Address. identity_document: National identity document of the card holder. additional_details: Optional additional data stored with your token in key/value pairs. Returns:
[ "When", "creating", "a", "Token", "remember", "to", "use", "the", "public", "-", "key", "header", "instead", "of", "the", "private", "-", "key", "header", "and", "do", "not", "include", "the", "app", "-", "id", "header", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/midl.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/midl.py#L73-L79
def generate(env): """Add Builders and construction variables for midl to an Environment.""" env['MIDL'] = 'MIDL.EXE' env['MIDLFLAGS'] = SCons.Util.CLVar('/nologo') env['MIDLCOM'] = '$MIDL $MIDLFLAGS /tlb ${TARGETS[0]} /h ${TARGETS[1]} /iid ${TARGETS[2]} /proxy ${TARGETS[3]} /dlldata ${TARGETS[4]} $SOURCE 2> NUL' env['BUILDERS']['TypeLibrary'] = midl_builder
[ "def", "generate", "(", "env", ")", ":", "env", "[", "'MIDL'", "]", "=", "'MIDL.EXE'", "env", "[", "'MIDLFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "'/nologo'", ")", "env", "[", "'MIDLCOM'", "]", "=", "'$MIDL $MIDLFLAGS /tlb ${TARGETS[0]} ...
Add Builders and construction variables for midl to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "midl", "to", "an", "Environment", "." ]
python
train
blue-yonder/tsfresh
tsfresh/feature_extraction/feature_calculators.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L772-L783
def last_location_of_minimum(x): """ Returns the last location of the minimal value of x. The position is calculated relatively to the length of x. :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float """ x = np.asarray(x) return 1.0 - np.argmin(x[::-1]) / len(x) if len(x) > 0 else np.NaN
[ "def", "last_location_of_minimum", "(", "x", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "return", "1.0", "-", "np", ".", "argmin", "(", "x", "[", ":", ":", "-", "1", "]", ")", "/", "len", "(", "x", ")", "if", "len", "(", "x", ...
Returns the last location of the minimal value of x. The position is calculated relatively to the length of x. :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float
[ "Returns", "the", "last", "location", "of", "the", "minimal", "value", "of", "x", ".", "The", "position", "is", "calculated", "relatively", "to", "the", "length", "of", "x", "." ]
python
train
saxix/sample-data-utils
sample_data_utils/people.py
https://github.com/saxix/sample-data-utils/blob/769f1b46e60def2675a14bd5872047af6d1ea398/sample_data_utils/people.py#L32-L54
def title(languages=None, genders=None): """ returns a random title .. code-block:: python >>> d.title() u'Mrs.' >>> d.title(['es']) u'El Sr.' >>> d.title(None, [GENDER_FEMALE]) u'Mrs.' :param languages: list of allowed languages. ['en'] if None :param genders: list of allowed genders. (GENDER_FEMALE, GENDER_MALE) if None """ languages = languages or ['en'] genders = genders or (GENDER_FEMALE, GENDER_MALE) choices = _get_titles(languages) gender = {'m':0, 'f':1}[random.choice(genders)] return random.choice(choices)[gender]
[ "def", "title", "(", "languages", "=", "None", ",", "genders", "=", "None", ")", ":", "languages", "=", "languages", "or", "[", "'en'", "]", "genders", "=", "genders", "or", "(", "GENDER_FEMALE", ",", "GENDER_MALE", ")", "choices", "=", "_get_titles", "(...
returns a random title .. code-block:: python >>> d.title() u'Mrs.' >>> d.title(['es']) u'El Sr.' >>> d.title(None, [GENDER_FEMALE]) u'Mrs.' :param languages: list of allowed languages. ['en'] if None :param genders: list of allowed genders. (GENDER_FEMALE, GENDER_MALE) if None
[ "returns", "a", "random", "title" ]
python
test
swharden/PyOriginTools
PyOriginTools/workbook.py
https://github.com/swharden/PyOriginTools/blob/536fb8e11234ffdc27e26b1800e0358179ca7d26/PyOriginTools/workbook.py#L191-L194
def nRows(self): """returns maximum number of rows based on the longest colData""" if self.nCols: return max([len(x) for x in self.colData]) else: return 0
[ "def", "nRows", "(", "self", ")", ":", "if", "self", ".", "nCols", ":", "return", "max", "(", "[", "len", "(", "x", ")", "for", "x", "in", "self", ".", "colData", "]", ")", "else", ":", "return", "0" ]
returns maximum number of rows based on the longest colData
[ "returns", "maximum", "number", "of", "rows", "based", "on", "the", "longest", "colData" ]
python
train
f213/rumetr-client
rumetr/roometr.py
https://github.com/f213/rumetr-client/blob/5180152bcb2eed8246b88035db7c0bb1fe603166/rumetr/roometr.py#L191-L210
def check_appt(self, complex: str, house: str, appt: str) -> bool: """ Check if given appartment exists in the rumetr database """ self.check_house(complex, house) if '%s__%s__%s' % (complex, house, appt) in self._checked_appts: return True try: self.get('developers/{developer}/complexes/{complex}/houses/{house}/appts/{appt}'.format( developer=self.developer, complex=complex, house=house, appt=appt, )) except exceptions.Rumetr404Exception: raise exceptions.RumetrApptNotFound('Unknown appt (house is known) — may be you should create one?') self._checked_appts.add('%s__%s__%s' % (complex, house, appt)) return True
[ "def", "check_appt", "(", "self", ",", "complex", ":", "str", ",", "house", ":", "str", ",", "appt", ":", "str", ")", "->", "bool", ":", "self", ".", "check_house", "(", "complex", ",", "house", ")", "if", "'%s__%s__%s'", "%", "(", "complex", ",", ...
Check if given appartment exists in the rumetr database
[ "Check", "if", "given", "appartment", "exists", "in", "the", "rumetr", "database" ]
python
train
rmorshea/spectate
spectate/core.py
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L311-L341
def watch(value, spectator_type=Spectator): """Register a :class:`Specatator` to a :class:`Watchable` and return it. In order to register callbacks to an eventful object, you need to create a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple object that has methods for adding, deleting, and triggering callbacks. To create a spectator we call ``spectator = watch(x)``, where x is a Watchable instance. Parameters ---------- value : Watchable A :class:`Watchable` instance. spectator_type : Spectator The type of spectator that will be returned. Returns ------- spectator: spectator_type The :class:`Specatator` (specified by ``spectator_type``) that is was registered to the given instance. """ if isinstance(value, Watchable): wtype = type(value) else: raise TypeError("Expected a Watchable, not %r." % value) spectator = getattr(value, "_instance_spectator", None) if not isinstance(spectator, Spectator): spectator = spectator_type(wtype) value._instance_spectator = spectator return spectator
[ "def", "watch", "(", "value", ",", "spectator_type", "=", "Spectator", ")", ":", "if", "isinstance", "(", "value", ",", "Watchable", ")", ":", "wtype", "=", "type", "(", "value", ")", "else", ":", "raise", "TypeError", "(", "\"Expected a Watchable, not %r.\"...
Register a :class:`Specatator` to a :class:`Watchable` and return it. In order to register callbacks to an eventful object, you need to create a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple object that has methods for adding, deleting, and triggering callbacks. To create a spectator we call ``spectator = watch(x)``, where x is a Watchable instance. Parameters ---------- value : Watchable A :class:`Watchable` instance. spectator_type : Spectator The type of spectator that will be returned. Returns ------- spectator: spectator_type The :class:`Specatator` (specified by ``spectator_type``) that is was registered to the given instance.
[ "Register", "a", ":", "class", ":", "Specatator", "to", "a", ":", "class", ":", "Watchable", "and", "return", "it", "." ]
python
train
pdkit/pdkit
pdkit/finger_tapping_processor.py
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L118-L131
def incoordination_score(self, data_frame): """ This method calculates the variance of the time interval in msec between taps :param data_frame: the data frame :type data_frame: pandas.DataFrame :return is: incoordination score :rtype is: float """ diff = data_frame.td[1:-1].values - data_frame.td[0:-2].values inc_s = np.var(diff[np.arange(1, len(diff), 2)], dtype=np.float64) * 1000.0 duration = math.ceil(data_frame.td[-1]) return inc_s, duration
[ "def", "incoordination_score", "(", "self", ",", "data_frame", ")", ":", "diff", "=", "data_frame", ".", "td", "[", "1", ":", "-", "1", "]", ".", "values", "-", "data_frame", ".", "td", "[", "0", ":", "-", "2", "]", ".", "values", "inc_s", "=", "...
This method calculates the variance of the time interval in msec between taps :param data_frame: the data frame :type data_frame: pandas.DataFrame :return is: incoordination score :rtype is: float
[ "This", "method", "calculates", "the", "variance", "of", "the", "time", "interval", "in", "msec", "between", "taps" ]
python
train
mitsei/dlkit
dlkit/records/assessment/basic/drag_and_drop_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/drag_and_drop_records.py#L952-L967
def clear_target_names(self, target_id): """stub""" if self.get_targets_metadata().is_read_only(): raise NoAccess() updated_targets = [] for current_target in self.my_osid_object_form._my_map['targets']: if current_target['id'] != target_id: updated_targets.append(current_target) else: updated_targets.append({ 'id': current_target['id'], 'texts': current_target['texts'], 'names': [], 'dropBehaviorType': current_target['dropBehaviorType'] }) self.my_osid_object_form._my_map['targets'] = updated_targets
[ "def", "clear_target_names", "(", "self", ",", "target_id", ")", ":", "if", "self", ".", "get_targets_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "NoAccess", "(", ")", "updated_targets", "=", "[", "]", "for", "current_target", "in", "...
stub
[ "stub" ]
python
train
openstack/proliantutils
proliantutils/ilo/firmware_controller.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/firmware_controller.py#L193-L248
def _get_socket(self, sslversion=ssl.PROTOCOL_TLSv1): """Sets up an https connection and do an HTTP/raw socket request :param sslversion: version of ssl session :raises: IloConnectionError, for connection failures :returns: ssl wrapped socket object """ err = None sock = None try: for res in socket.getaddrinfo( self.hostname, self.port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: sock = socket.socket(af, socktype, proto) sock.settimeout(self.timeout) # Connecting to {self.hostname} at port {self.port} sock.connect(sa) except socket.timeout: if sock is not None: sock.close() err = exception.IloConnectionError( "Timeout connecting to %(hostname)s:%(port)d" % {'hostname': self.hostname, 'port': self.port}) except socket.error: if sock is not None: sock.close() e = sys.exc_info()[1] err = exception.IloConnectionError( "Error connecting to %(hostname)s:%(port)d : %(error)s" % {'hostname': self.hostname, 'port': self.port, 'error': str(e)}) except Exception: raise exception.IloConnectionError( "Unable to resolve %s" % self.hostname) if err is not None: raise err # wrapping the socket over ssl session try: return ssl.wrap_socket(sock, ssl_version=sslversion) except socket.sslerror: e = sys.exc_info()[1] msg = (getattr(e, 'reason', None) or getattr(e, 'message', None)) # Some older iLO s don't support TLSv1, retry with SSLv3 if ('wrong version number' in msg) and ( sslversion == ssl.PROTOCOL_TLSv1): return self._get_socket(ssl.PROTOCOL_SSLv3) raise exception.IloConnectionError( "Cannot establish ssl session with %(hostname)s:%(port)d : " "%(error)s" % {'hostname': self.hostname, 'port': self.port, 'error': str(e)})
[ "def", "_get_socket", "(", "self", ",", "sslversion", "=", "ssl", ".", "PROTOCOL_TLSv1", ")", ":", "err", "=", "None", "sock", "=", "None", "try", ":", "for", "res", "in", "socket", ".", "getaddrinfo", "(", "self", ".", "hostname", ",", "self", ".", ...
Sets up an https connection and do an HTTP/raw socket request :param sslversion: version of ssl session :raises: IloConnectionError, for connection failures :returns: ssl wrapped socket object
[ "Sets", "up", "an", "https", "connection", "and", "do", "an", "HTTP", "/", "raw", "socket", "request" ]
python
train
eclipse/unide.python
src/unide/measurement.py
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/measurement.py#L214-L222
def samples(self): """Yield samples as dictionaries, keyed by dimensions.""" names = self.series.dimensions for n, offset in enumerate(self.series.offsets): dt = datetime.timedelta(microseconds=offset * 1000) d = {"ts": self.ts + dt} for name in names: d[name] = getattr(self.series, name)[n] yield d
[ "def", "samples", "(", "self", ")", ":", "names", "=", "self", ".", "series", ".", "dimensions", "for", "n", ",", "offset", "in", "enumerate", "(", "self", ".", "series", ".", "offsets", ")", ":", "dt", "=", "datetime", ".", "timedelta", "(", "micros...
Yield samples as dictionaries, keyed by dimensions.
[ "Yield", "samples", "as", "dictionaries", "keyed", "by", "dimensions", "." ]
python
train
sdispater/pendulum
pendulum/__init__.py
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/__init__.py#L137-L145
def local( year, month, day, hour=0, minute=0, second=0, microsecond=0 ): # type: (int, int, int, int, int, int, int) -> DateTime """ Return a DateTime in the local timezone. """ return datetime( year, month, day, hour, minute, second, microsecond, tz=local_timezone() )
[ "def", "local", "(", "year", ",", "month", ",", "day", ",", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", ":", "# type: (int, int, int, int, int, int, int) -> DateTime", "return", "datetime", "(", "y...
Return a DateTime in the local timezone.
[ "Return", "a", "DateTime", "in", "the", "local", "timezone", "." ]
python
train
tkarabela/pysubs2
pysubs2/time.py
https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L68-L86
def frames_to_ms(frames, fps): """ Convert frame-based duration to milliseconds. Arguments: frames: Number of frames (should be int). fps: Framerate (must be a positive number, eg. 23.976). Returns: Number of milliseconds (rounded to int). Raises: ValueError: fps was negative or zero. """ if fps <= 0: raise ValueError("Framerate must be positive number (%f)." % fps) return int(round(frames * (1000 / fps)))
[ "def", "frames_to_ms", "(", "frames", ",", "fps", ")", ":", "if", "fps", "<=", "0", ":", "raise", "ValueError", "(", "\"Framerate must be positive number (%f).\"", "%", "fps", ")", "return", "int", "(", "round", "(", "frames", "*", "(", "1000", "/", "fps",...
Convert frame-based duration to milliseconds. Arguments: frames: Number of frames (should be int). fps: Framerate (must be a positive number, eg. 23.976). Returns: Number of milliseconds (rounded to int). Raises: ValueError: fps was negative or zero.
[ "Convert", "frame", "-", "based", "duration", "to", "milliseconds", ".", "Arguments", ":", "frames", ":", "Number", "of", "frames", "(", "should", "be", "int", ")", ".", "fps", ":", "Framerate", "(", "must", "be", "a", "positive", "number", "eg", ".", ...
python
train
RIPE-NCC/ripe.atlas.sagan
ripe/atlas/sagan/base.py
https://github.com/RIPE-NCC/ripe.atlas.sagan/blob/f0e57221cf0ba3504baddd3ea460fc955bc41cc6/ripe/atlas/sagan/base.py#L264-L281
def calculate_median(given_list): """ Returns the median of values in the given list. """ median = None if not given_list: return median given_list = sorted(given_list) list_length = len(given_list) if list_length % 2: median = given_list[int(list_length / 2)] else: median = (given_list[int(list_length / 2)] + given_list[int(list_length / 2) - 1]) / 2.0 return median
[ "def", "calculate_median", "(", "given_list", ")", ":", "median", "=", "None", "if", "not", "given_list", ":", "return", "median", "given_list", "=", "sorted", "(", "given_list", ")", "list_length", "=", "len", "(", "given_list", ")", "if", "list_length", "%...
Returns the median of values in the given list.
[ "Returns", "the", "median", "of", "values", "in", "the", "given", "list", "." ]
python
train
COUR4G3/flask-gssapi
flask_gssapi.py
https://github.com/COUR4G3/flask-gssapi/blob/d3842e14cdf9bf6818d7a154845e951a8e9c058d/flask_gssapi.py#L44-L62
def authenticate(self): """Attempts to authenticate the user if a token was provided.""" if request.headers.get('Authorization', '').startswith('Negotiate '): in_token = base64.b64decode(request.headers['Authorization'][10:]) try: creds = current_app.extensions['gssapi']['creds'] except KeyError: raise RuntimeError('flask-gssapi not configured for this app') ctx = gssapi.SecurityContext(creds=creds, usage='accept') out_token = ctx.step(in_token) if ctx.complete: username = ctx._inquire(initiator_name=True).initiator_name return str(username), out_token return None, None
[ "def", "authenticate", "(", "self", ")", ":", "if", "request", ".", "headers", ".", "get", "(", "'Authorization'", ",", "''", ")", ".", "startswith", "(", "'Negotiate '", ")", ":", "in_token", "=", "base64", ".", "b64decode", "(", "request", ".", "header...
Attempts to authenticate the user if a token was provided.
[ "Attempts", "to", "authenticate", "the", "user", "if", "a", "token", "was", "provided", "." ]
python
train
kstaniek/condoor
condoor/protocols/ssh.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/protocols/ssh.py#L32-L52
def get_command(self, version=2): """Return the SSH protocol specific command to connect.""" try: options = _C['options'] options_str = " -o ".join(options) if options_str: options_str = "-o " + options_str + " " except KeyError: options_str = "" if self.username: # Not supported on SunOS # "-o ConnectTimeout={} command = "ssh {}" \ "-{} " \ "-p {} {}@{}".format(options_str, version, self.port, self.username, self.hostname) else: command = "ssh {} " \ "-{} " \ "-p {} {}".format(options_str, version, self.port, self.hostname) return command
[ "def", "get_command", "(", "self", ",", "version", "=", "2", ")", ":", "try", ":", "options", "=", "_C", "[", "'options'", "]", "options_str", "=", "\" -o \"", ".", "join", "(", "options", ")", "if", "options_str", ":", "options_str", "=", "\"-o \"", "...
Return the SSH protocol specific command to connect.
[ "Return", "the", "SSH", "protocol", "specific", "command", "to", "connect", "." ]
python
train
qubell/contrib-python-qubell-client
qubell/api/private/instance.py
https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/api/private/instance.py#L508-L522
def get_most_recent_update_time(self): """ Indicated most recent update of the instance, assumption based on: - if currentWorkflow exists, its startedAt time is most recent update. - else max of workflowHistory startedAt is most recent update. """ def parse_time(t): if t: return time.gmtime(t/1000) return None try: max_wf_started_at = max([i.get('startedAt') for i in self.workflowHistory]) return parse_time(max_wf_started_at) except ValueError: return None
[ "def", "get_most_recent_update_time", "(", "self", ")", ":", "def", "parse_time", "(", "t", ")", ":", "if", "t", ":", "return", "time", ".", "gmtime", "(", "t", "/", "1000", ")", "return", "None", "try", ":", "max_wf_started_at", "=", "max", "(", "[", ...
Indicated most recent update of the instance, assumption based on: - if currentWorkflow exists, its startedAt time is most recent update. - else max of workflowHistory startedAt is most recent update.
[ "Indicated", "most", "recent", "update", "of", "the", "instance", "assumption", "based", "on", ":", "-", "if", "currentWorkflow", "exists", "its", "startedAt", "time", "is", "most", "recent", "update", ".", "-", "else", "max", "of", "workflowHistory", "started...
python
train
willemarcel/osmcha
osmcha/changeset.py
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L110-L118
def get_metadata(changeset): """Get the metadata of a changeset using the OSM API and return it as a XML ElementTree. Args: changeset: the id of the changeset. """ url = 'https://www.openstreetmap.org/api/0.6/changeset/{}'.format(changeset) return ET.fromstring(requests.get(url).content).getchildren()[0]
[ "def", "get_metadata", "(", "changeset", ")", ":", "url", "=", "'https://www.openstreetmap.org/api/0.6/changeset/{}'", ".", "format", "(", "changeset", ")", "return", "ET", ".", "fromstring", "(", "requests", ".", "get", "(", "url", ")", ".", "content", ")", "...
Get the metadata of a changeset using the OSM API and return it as a XML ElementTree. Args: changeset: the id of the changeset.
[ "Get", "the", "metadata", "of", "a", "changeset", "using", "the", "OSM", "API", "and", "return", "it", "as", "a", "XML", "ElementTree", "." ]
python
valid
theislab/scanpy
scanpy/plotting/_tools/__init__.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_tools/__init__.py#L590-L638
def sim(adata, tmax_realization=None, as_heatmap=False, shuffle=False, show=None, save=None): """Plot results of simulation. Parameters ---------- as_heatmap : bool (default: False) Plot the timeseries as heatmap. tmax_realization : int or None (default: False) Number of observations in one realization of the time series. The data matrix adata.X consists in concatenated realizations. shuffle : bool, optional (default: False) Shuffle the data. save : `bool` or `str`, optional (default: `None`) If `True` or a `str`, save the figure. A string is appended to the default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}. show : bool, optional (default: `None`) Show the plot, do not return axis. """ from ... import utils as sc_utils if tmax_realization is not None: tmax = tmax_realization elif 'tmax_write' in adata.uns: tmax = adata.uns['tmax_write'] else: tmax = adata.n_obs n_realizations = adata.n_obs/tmax if not shuffle: if not as_heatmap: timeseries(adata.X, var_names=adata.var_names, xlim=[0, 1.25*adata.n_obs], highlightsX=np.arange(tmax, n_realizations*tmax, tmax), xlabel='realizations') else: # plot time series as heatmap, as in Haghverdi et al. (2016), Fig. 1d timeseries_as_heatmap(adata.X, var_names=adata.var_names, highlightsX=np.arange(tmax, n_realizations*tmax, tmax)) pl.xticks(np.arange(0, n_realizations*tmax, tmax), np.arange(n_realizations).astype(int) + 1) utils.savefig_or_show('sim', save=save, show=show) else: # shuffled data X = adata.X X, rows = sc_utils.subsample(X, seed=1) timeseries(X, var_names=adata.var_names, xlim=[0, 1.25*adata.n_obs], highlightsX=np.arange(tmax, n_realizations*tmax, tmax), xlabel='index (arbitrary order)') utils.savefig_or_show('sim_shuffled', save=save, show=show)
[ "def", "sim", "(", "adata", ",", "tmax_realization", "=", "None", ",", "as_heatmap", "=", "False", ",", "shuffle", "=", "False", ",", "show", "=", "None", ",", "save", "=", "None", ")", ":", "from", ".", ".", ".", "import", "utils", "as", "sc_utils",...
Plot results of simulation. Parameters ---------- as_heatmap : bool (default: False) Plot the timeseries as heatmap. tmax_realization : int or None (default: False) Number of observations in one realization of the time series. The data matrix adata.X consists in concatenated realizations. shuffle : bool, optional (default: False) Shuffle the data. save : `bool` or `str`, optional (default: `None`) If `True` or a `str`, save the figure. A string is appended to the default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}. show : bool, optional (default: `None`) Show the plot, do not return axis.
[ "Plot", "results", "of", "simulation", "." ]
python
train
Microsoft/botbuilder-python
libraries/botframework-connector/azure_bdist_wheel.py
https://github.com/Microsoft/botbuilder-python/blob/274663dd91c811bae6ac4488915ba5880771b0a7/libraries/botframework-connector/azure_bdist_wheel.py#L321-L353
def setupcfg_requirements(self): """Generate requirements from setup.cfg as ('Requires-Dist', 'requirement; qualifier') tuples. From a metadata section in setup.cfg: [metadata] provides-extra = extra1 extra2 requires-dist = requirement; qualifier another; qualifier2 unqualified Yields ('Provides-Extra', 'extra1'), ('Provides-Extra', 'extra2'), ('Requires-Dist', 'requirement; qualifier'), ('Requires-Dist', 'another; qualifier2'), ('Requires-Dist', 'unqualified') """ metadata = self.distribution.get_option_dict('metadata') # our .ini parser folds - to _ in key names: for key, title in (('provides_extra', 'Provides-Extra'), ('requires_dist', 'Requires-Dist')): if not key in metadata: continue field = metadata[key] for line in field[1].splitlines(): line = line.strip() if not line: continue yield (title, line)
[ "def", "setupcfg_requirements", "(", "self", ")", ":", "metadata", "=", "self", ".", "distribution", ".", "get_option_dict", "(", "'metadata'", ")", "# our .ini parser folds - to _ in key names:", "for", "key", ",", "title", "in", "(", "(", "'provides_extra'", ",", ...
Generate requirements from setup.cfg as ('Requires-Dist', 'requirement; qualifier') tuples. From a metadata section in setup.cfg: [metadata] provides-extra = extra1 extra2 requires-dist = requirement; qualifier another; qualifier2 unqualified Yields ('Provides-Extra', 'extra1'), ('Provides-Extra', 'extra2'), ('Requires-Dist', 'requirement; qualifier'), ('Requires-Dist', 'another; qualifier2'), ('Requires-Dist', 'unqualified')
[ "Generate", "requirements", "from", "setup", ".", "cfg", "as", "(", "Requires", "-", "Dist", "requirement", ";", "qualifier", ")", "tuples", ".", "From", "a", "metadata", "section", "in", "setup", ".", "cfg", ":" ]
python
test
spotify/pyschema
pyschema/source_generation.py
https://github.com/spotify/pyschema/blob/7e6c3934150bcb040c628d74ace6caf5fcf867df/pyschema/source_generation.py#L193-L224
def _class_source(schema, indent): """Generate Python source code for one specific class Doesn't include or take into account any dependencies between record types """ def_pattern = ( "class {class_name}(pyschema.Record):\n" "{indent}# WARNING: This class was generated by pyschema.to_python_source\n" "{indent}# there is a risk that any modification made to this class will be overwritten\n" "{optional_namespace_def}" "{field_defs}\n" ) if hasattr(schema, '_namespace'): optional_namespace_def = "{indent}_namespace = {namespace!r}\n".format( namespace=schema._namespace, indent=indent) else: optional_namespace_def = "" field_defs = [ "{indent}{field_name} = {field!r}".format(field_name=field_name, field=field, indent=indent) for field_name, field in schema._fields.iteritems() ] if not field_defs: field_defs = ["{indent}pass".format(indent=indent)] return def_pattern.format( class_name=schema._schema_name, optional_namespace_def=optional_namespace_def, field_defs="\n".join(field_defs), indent=indent )
[ "def", "_class_source", "(", "schema", ",", "indent", ")", ":", "def_pattern", "=", "(", "\"class {class_name}(pyschema.Record):\\n\"", "\"{indent}# WARNING: This class was generated by pyschema.to_python_source\\n\"", "\"{indent}# there is a risk that any modification made to this class w...
Generate Python source code for one specific class Doesn't include or take into account any dependencies between record types
[ "Generate", "Python", "source", "code", "for", "one", "specific", "class" ]
python
test
saltstack/salt
salt/modules/sysmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysmod.py#L582-L632
def list_state_functions(*args, **kwargs): # pylint: disable=unused-argument ''' List the functions for all state modules. Optionally, specify a state module or modules from which to list. .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' sys.list_state_functions salt '*' sys.list_state_functions file salt '*' sys.list_state_functions pkg user State function names can be specified as globs. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' sys.list_state_functions 'file.*' salt '*' sys.list_state_functions 'file.s*' .. versionadded:: ? .. code-block:: bash salt '*' sys.list_state_functions 'module.specific_function' ''' # NOTE: **kwargs is used here to prevent a traceback when garbage # arguments are tacked on to the end. st_ = salt.state.State(__opts__) if not args: # We're being asked for all functions return sorted(st_.states) names = set() for module in args: if '*' in module or '.' in module: for func in fnmatch.filter(st_.states, module): names.add(func) else: # "sys" should just match sys without also matching sysctl moduledot = module + '.' for func in st_.states: if func.startswith(moduledot): names.add(func) return sorted(names)
[ "def", "list_state_functions", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "# NOTE: **kwargs is used here to prevent a traceback when garbage", "# arguments are tacked on to the end.", "st_", "=", "salt", ".", "state", ".", ...
List the functions for all state modules. Optionally, specify a state module or modules from which to list. .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' sys.list_state_functions salt '*' sys.list_state_functions file salt '*' sys.list_state_functions pkg user State function names can be specified as globs. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' sys.list_state_functions 'file.*' salt '*' sys.list_state_functions 'file.s*' .. versionadded:: ? .. code-block:: bash salt '*' sys.list_state_functions 'module.specific_function'
[ "List", "the", "functions", "for", "all", "state", "modules", ".", "Optionally", "specify", "a", "state", "module", "or", "modules", "from", "which", "to", "list", "." ]
python
train
jazzband/django-mongonaut
mongonaut/forms/forms.py
https://github.com/jazzband/django-mongonaut/blob/5485b2e029dff8ae267a4cb39c92d0a72cb5b144/mongonaut/forms/forms.py#L101-L121
def create_list_dict(self, document, list_field, doc_key): """ Genereates a dictionary representation of the list field. Document should be the document the list_field comes from. DO NOT CALL DIRECTLY """ list_dict = {"_document": document} if isinstance(list_field.field, EmbeddedDocumentField): list_dict.update(self.create_document_dictionary(document=list_field.field.document_type_obj, owner_document=document)) # Set the list_dict after it may have been updated list_dict.update({"_document_field": list_field.field, "_key": doc_key, "_field_type": ListField, "_widget": get_widget(list_field.field), "_value": getattr(document, doc_key, None)}) return list_dict
[ "def", "create_list_dict", "(", "self", ",", "document", ",", "list_field", ",", "doc_key", ")", ":", "list_dict", "=", "{", "\"_document\"", ":", "document", "}", "if", "isinstance", "(", "list_field", ".", "field", ",", "EmbeddedDocumentField", ")", ":", "...
Genereates a dictionary representation of the list field. Document should be the document the list_field comes from. DO NOT CALL DIRECTLY
[ "Genereates", "a", "dictionary", "representation", "of", "the", "list", "field", ".", "Document", "should", "be", "the", "document", "the", "list_field", "comes", "from", "." ]
python
valid
vxgmichel/aioconsole
aioconsole/events.py
https://github.com/vxgmichel/aioconsole/blob/8223435723d616fd4db398431d6a6182a6015e3f/aioconsole/events.py#L71-L79
def set_interactive_policy(*, locals=None, banner=None, serve=None, prompt_control=None): """Use an interactive event loop by default.""" policy = InteractiveEventLoopPolicy( locals=locals, banner=banner, serve=serve, prompt_control=prompt_control) asyncio.set_event_loop_policy(policy)
[ "def", "set_interactive_policy", "(", "*", ",", "locals", "=", "None", ",", "banner", "=", "None", ",", "serve", "=", "None", ",", "prompt_control", "=", "None", ")", ":", "policy", "=", "InteractiveEventLoopPolicy", "(", "locals", "=", "locals", ",", "ban...
Use an interactive event loop by default.
[ "Use", "an", "interactive", "event", "loop", "by", "default", "." ]
python
train
VorskiImagineering/C3PO
c3po/mod/communicator.py
https://github.com/VorskiImagineering/C3PO/blob/e3e35835e5ac24158848afed4f905ca44ac3ae00/c3po/mod/communicator.py#L241-L256
def upload(self): """ Upload all po files to GDocs ignoring conflicts. This method looks for all msgids in po_files and sends them as ods to GDocs Spreadsheet. """ local_ods_path = os.path.join(self.temp_path, LOCAL_ODS) try: po_to_ods(self.languages, self.locale_root, self.po_files_path, local_ods_path) except (IOError, OSError) as e: raise PODocsError(e) self._upload_file_to_gdoc(local_ods_path) self._clear_temp()
[ "def", "upload", "(", "self", ")", ":", "local_ods_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "temp_path", ",", "LOCAL_ODS", ")", "try", ":", "po_to_ods", "(", "self", ".", "languages", ",", "self", ".", "locale_root", ",", "self", ...
Upload all po files to GDocs ignoring conflicts. This method looks for all msgids in po_files and sends them as ods to GDocs Spreadsheet.
[ "Upload", "all", "po", "files", "to", "GDocs", "ignoring", "conflicts", ".", "This", "method", "looks", "for", "all", "msgids", "in", "po_files", "and", "sends", "them", "as", "ods", "to", "GDocs", "Spreadsheet", "." ]
python
test
numenta/nupic
src/nupic/encoders/delta.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/delta.py#L102-L116
def topDownCompute(self, encoded): """[ScalarEncoder class method override]""" #Decode to delta scalar if self._prevAbsolute==None or self._prevDelta==None: return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))] ret = self._adaptiveScalarEnc.topDownCompute(encoded) if self._prevAbsolute != None: ret = [EncoderResult(value=ret[0].value+self._prevAbsolute, scalar=ret[0].scalar+self._prevAbsolute, encoding=ret[0].encoding)] # ret[0].value+=self._prevAbsolute # ret[0].scalar+=self._prevAbsolute return ret
[ "def", "topDownCompute", "(", "self", ",", "encoded", ")", ":", "#Decode to delta scalar", "if", "self", ".", "_prevAbsolute", "==", "None", "or", "self", ".", "_prevDelta", "==", "None", ":", "return", "[", "EncoderResult", "(", "value", "=", "0", ",", "s...
[ScalarEncoder class method override]
[ "[", "ScalarEncoder", "class", "method", "override", "]" ]
python
valid
GGiecold/DBSCAN_multiplex
DBSCAN_multiplex.py
https://github.com/GGiecold/DBSCAN_multiplex/blob/075b1eec86d0e75166a9378d7d9a8974fc0a5e2e/DBSCAN_multiplex.py#L194-L442
def load(hdf5_file_name, data, minPts, eps = None, quantile = 50, subsamples_matrix = None, samples_weights = None, metric = 'minkowski', p = 2, verbose = True): """Determines the radius 'eps' for DBSCAN clustering of 'data' in an adaptive, data-dependent way. Parameters ---------- hdf5_file_name : file object or string The handle or name of an HDF5 data structure where any array needed for DBSCAN and too large to fit into memory is to be stored. data : array of shape (n_samples, n_features) An array of features retained from the data-set to be analysed. Subsamples of this curated data-set can also be analysed by a call to DBSCAN by providing an appropriate list of selected samples labels, stored in 'subsamples_matrix' (see below). subsamples_matrix : array of shape (n_runs, n_subsamples), optional (default = None) Each row of this matrix contains a set of indices identifying the samples selected from the whole data-set for each of 'n_runs' independent rounds of DBSCAN clusterings. minPts : int The number of points within an epsilon-radius hypershpere for the said region to qualify as dense. eps : float, optional (default = None) Sets the maximum distance separating two data-points for those data-points to be considered as part of the same neighborhood. quantile : int, optional (default = 50) If 'eps' is not provided by the user, it will be determined as the 'quantile' of the distribution of the k-nearest distances to each sample, with k set to 'minPts'. samples_weights : array of shape (n_runs, n_samples), optional (default = None) Holds the weights of each sample. A sample with weight greater than 'minPts' is guaranteed to be a core sample; a sample with negative weight tends to prevent its 'eps'-neighbors from being core. Weights are absolute and default to 1. metric : string or callable, optional (default = 'euclidean') The metric to use for computing the pairwise distances between samples (each sample corresponds to a row in 'data'). If metric is a string or callable, it must be compatible with metrics.pairwise.pairwise_distances. p : float, optional (default = 2) If a Minkowski metric is used, 'p' determines its power. verbose : Boolean, optional (default = True) Whether to display messages reporting the status of the computations and the time it took to complete each major stage of the algorithm. Returns ------- eps : float The parameter of DBSCAN clustering specifying if points are density-reachable. This is either a copy of the value provided at input or, if the user did not specify a value of 'eps' at input, the return value if the one determined from k-distance graphs from the data-set. References ---------- Ester, M., H. P. Kriegel, J. Sander and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ data = np.array(data, copy = False) if data.ndim > 2: raise ValueError("\nERROR: DBSCAN_multiplex @ load:\n" "the data array is of dimension %d. Please provide a two-dimensional " "array instead.\n" % data.ndim) if subsamples_matrix is None: subsamples_matrix = np.arange(data.shape[0], dtype = int) subsamples_matrix = subsamples_matrix.reshape(1, -1) else: subsamples_matrix = np.array(subsamples_matrix, copy = False) if subsamples_matrix.ndim > 2: raise ValueError("\nERROR: DBSCAN_multiplex @ load:\n" "the array of subsampled indices is of dimension %d. " "Please provide a two-dimensional array instead.\n" % subsamples_matrix.ndim) if (data.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(data.sum()) and not np.all(np.isfinite(data))): raise ValueError('\nERROR: DBSCAN_multiplex @ load:\n' 'the data vector contains at least one infinite or NaN entry.\n') if (subsamples_matrix.dtype.type is np.int_ and not np.isfinite(subsamples_matrix.sum()) and not np.all(np.isfinite(subsamples_matrix))): raise ValueError('\nERROR: DBSCAN_multiplex @ load:\n' 'the array of subsampled indices contains at least one infinite or NaN entry.\n') if not np.all(subsamples_matrix >= 0): raise ValueError('\nERROR: DBSCAN_multiplex @ load:\n' 'the sampled indices should all be positive integers.\n') N_samples = data.shape[0] N_runs, N_subsamples = subsamples_matrix.shape if N_subsamples > N_samples: raise ValueError('\nERROR: DBSCAN_multiplex @ load:\n' 'the number of sampled indices cannot exceed the total number of samples in the whole data-set.\n') for i in range(N_runs): subsamples_matrix[i] = np.unique(subsamples_matrix[i]) if not isinstance(minPts, int): raise TypeError("\nERROR: DBSCAN_multiplex @ load:\n" "the parameter 'minPts' must be an integer.\n") if minPts < 2: raise ValueError("\nERROR: DBSCAN_multiplex @ load:\n" "the value of 'minPts' must be larger than 1.\n") if eps is None: # Determine the parameter 'eps' as the median of the distribution # of the maximum of the minPts-nearest neighbors distances for each sample. if verbose: print(("INFO: DBSCAN_multiplex @ load:\n" "starting the determination of an appropriate value of 'eps' for this data-set" " and for the other parameter of the DBSCAN algorithm set to {minPts}.\n" "This might take a while.".format(**locals()))) beg_eps = time.time() quantile = np.rint(quantile) quantile = np.clip(quantile, 0, 100) k_distances = kneighbors_graph(data, minPts, mode = 'distance', metric = metric, p = p).data radii = np.zeros(N_samples, dtype = float) for i in range(0, minPts): radii = np.maximum(radii, k_distances[i::minPts]) if quantile == 50: eps = round(np.median(radii, overwrite_input = True), 4) else: eps = round(np.percentile(radii, quantile), 4) end_eps = time.time() if verbose: print(("\nINFO: DBSCAN_multiplex @ load:\n" "done with evaluating parameter 'eps' from the data-set provided." " This took {} seconds. Value of epsilon: {}.".format(round(end_eps - beg_eps, 4), eps))) else: if not (isinstance(eps, float) or isinstance(eps, int)): raise ValueError("\nERROR: DBSCAN_multiplex @ load:\n" "please provide a numeric value for the radius 'eps'.\n") if not eps > 0.0: raise ValueError("\nERROR: DBSCAN_multiplex @ load:\n" "the radius 'eps' must be positive.\n") eps = round(eps, 4) # For all samples with a large enough neighborhood, 'neighborhoods_indices' # and 'neighborhoods_indptr' help us find the neighbors to every sample. Note # that this definition of neighbors leaves the original point in, # which will be considered later. if verbose: print(("\nINFO: DBSCAN_multiplex @ load:\n" "identifying the neighbors within an hypersphere of radius {eps} around each sample," " while at the same time evaluating the number of epsilon-neighbors for each sample.\n" "This might take a fair amount of time.".format(**locals()))) beg_neigh = time.time() fileh = tables.open_file(hdf5_file_name, mode = 'r+') DBSCAN_group = fileh.create_group(fileh.root, 'DBSCAN_group') neighborhoods_indices = fileh.create_earray(DBSCAN_group, 'neighborhoods_indices', tables.Int32Atom(), (0,), 'Indices array for sparse matrix of neighborhoods', expectedrows = int((N_samples ** 2) / 50)) # 'neighborhoods_indptr' is such that for each of row i of the data-matrix # neighborhoods_indices[neighborhoods_indptr[i]:neighborhoods_indptr[i+1]] # contains the column indices of row i from the array of # 'eps'-neighborhoods. neighborhoods_indptr = np.zeros(1, dtype = np.int64) # For each sample, 'neighbors_counts' will keep a tally of the number # of its neighbors within a hypersphere of radius 'eps'. # Note that the sample itself is counted as part of this neighborhood. neighbors_counts = fileh.create_carray(DBSCAN_group, 'neighbors_counts', tables.Int32Atom(), (N_runs, N_samples), 'Array of the number of neighbors around each sample of a set of subsampled points', filters = None) chunks_size = get_chunk_size(N_samples, 3) for i in range(0, N_samples, chunks_size): chunk = data[i:min(i + chunks_size, N_samples)] D = pairwise_distances(chunk, data, metric = metric, p = p, n_jobs = 1) D = (D <= eps) if samples_weights is None: for run in range(N_runs): x = subsamples_matrix[run] M = np.take(D, x, axis = 1) legit_rows = np.intersect1d(i + np.arange(min(chunks_size, N_samples - i)), x, assume_unique = True) M = np.take(M, legit_rows - i, axis = 0) neighbors_counts[run, legit_rows] = M.sum(axis = 1) del M else: for run in range(N_runs): x = subsamples_matrix[run] M = np.take(D, x, axis = 1) legit_rows = np.intersect1d(i + np.arange(min(chunks_size, N_samples - i)), x, assume_unique = True) M = np.take(M, legit_rows - i, axis = 0) neighbors_counts[run, legit_rows] = np.array([np.sum(samples_weights[x[row]]) for row in M]) del M candidates = np.where(D == True) del D neighborhoods_indices.append(candidates[1]) _, nbr = np.unique(candidates[0], return_counts = True) counts = np.cumsum(nbr) + neighborhoods_indptr[-1] del candidates neighborhoods_indptr = np.append(neighborhoods_indptr, counts) fileh.create_carray(DBSCAN_group, 'neighborhoods_indptr', tables.Int64Atom(), (N_samples + 1,), 'Array of cumulative number of column indices for each row', filters = None) fileh.root.DBSCAN_group.neighborhoods_indptr[:] = neighborhoods_indptr[:] fileh.create_carray(DBSCAN_group, 'subsamples_matrix', tables.Int32Atom(), (N_runs, N_subsamples), 'Array of subsamples indices', filters = None) fileh.root.DBSCAN_group.subsamples_matrix[:] = subsamples_matrix[:] fileh.close() end_neigh = time.time() if verbose: print(("\nINFO: DBSCAN_multiplex @ load:\n" "done with the neighborhoods. This step took {} seconds.".format(round(end_neigh - beg_neigh, 4)))) gc.collect() return eps
[ "def", "load", "(", "hdf5_file_name", ",", "data", ",", "minPts", ",", "eps", "=", "None", ",", "quantile", "=", "50", ",", "subsamples_matrix", "=", "None", ",", "samples_weights", "=", "None", ",", "metric", "=", "'minkowski'", ",", "p", "=", "2", ",...
Determines the radius 'eps' for DBSCAN clustering of 'data' in an adaptive, data-dependent way. Parameters ---------- hdf5_file_name : file object or string The handle or name of an HDF5 data structure where any array needed for DBSCAN and too large to fit into memory is to be stored. data : array of shape (n_samples, n_features) An array of features retained from the data-set to be analysed. Subsamples of this curated data-set can also be analysed by a call to DBSCAN by providing an appropriate list of selected samples labels, stored in 'subsamples_matrix' (see below). subsamples_matrix : array of shape (n_runs, n_subsamples), optional (default = None) Each row of this matrix contains a set of indices identifying the samples selected from the whole data-set for each of 'n_runs' independent rounds of DBSCAN clusterings. minPts : int The number of points within an epsilon-radius hypershpere for the said region to qualify as dense. eps : float, optional (default = None) Sets the maximum distance separating two data-points for those data-points to be considered as part of the same neighborhood. quantile : int, optional (default = 50) If 'eps' is not provided by the user, it will be determined as the 'quantile' of the distribution of the k-nearest distances to each sample, with k set to 'minPts'. samples_weights : array of shape (n_runs, n_samples), optional (default = None) Holds the weights of each sample. A sample with weight greater than 'minPts' is guaranteed to be a core sample; a sample with negative weight tends to prevent its 'eps'-neighbors from being core. Weights are absolute and default to 1. metric : string or callable, optional (default = 'euclidean') The metric to use for computing the pairwise distances between samples (each sample corresponds to a row in 'data'). If metric is a string or callable, it must be compatible with metrics.pairwise.pairwise_distances. p : float, optional (default = 2) If a Minkowski metric is used, 'p' determines its power. verbose : Boolean, optional (default = True) Whether to display messages reporting the status of the computations and the time it took to complete each major stage of the algorithm. Returns ------- eps : float The parameter of DBSCAN clustering specifying if points are density-reachable. This is either a copy of the value provided at input or, if the user did not specify a value of 'eps' at input, the return value if the one determined from k-distance graphs from the data-set. References ---------- Ester, M., H. P. Kriegel, J. Sander and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
[ "Determines", "the", "radius", "eps", "for", "DBSCAN", "clustering", "of", "data", "in", "an", "adaptive", "data", "-", "dependent", "way", "." ]
python
train
nion-software/nionswift
nion/swift/Facade.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Facade.py#L2761-L2768
def document_windows(self) -> typing.List[DocumentWindow]: """Return the document windows. .. versionadded:: 1.0 Scriptable: Yes """ return [DocumentWindow(document_controller) for document_controller in self.__application.document_controllers]
[ "def", "document_windows", "(", "self", ")", "->", "typing", ".", "List", "[", "DocumentWindow", "]", ":", "return", "[", "DocumentWindow", "(", "document_controller", ")", "for", "document_controller", "in", "self", ".", "__application", ".", "document_controller...
Return the document windows. .. versionadded:: 1.0 Scriptable: Yes
[ "Return", "the", "document", "windows", "." ]
python
train
TelAPI/telapi-python
telapi/rest/__init__.py
https://github.com/TelAPI/telapi-python/blob/a13dcb12f7ae5ca9f681b96f2802fff6ec2aff32/telapi/rest/__init__.py#L178-L197
def fetch(self, resource_data=None): """Populates this class with remote data""" if not self._populated: params = { "Page" : self.page, "PageSize" : self.page_size, } params.update(self._filters) if not resource_data: self._resource_data = self._client._get(self._url + ".json", params) else: self._resource_data = resource_data self.total = self._resource_data["total"] self._page_start = self._resource_data["start"] self._page_end = self._resource_data["end"] self._populated = True
[ "def", "fetch", "(", "self", ",", "resource_data", "=", "None", ")", ":", "if", "not", "self", ".", "_populated", ":", "params", "=", "{", "\"Page\"", ":", "self", ".", "page", ",", "\"PageSize\"", ":", "self", ".", "page_size", ",", "}", "params", "...
Populates this class with remote data
[ "Populates", "this", "class", "with", "remote", "data" ]
python
train
Lagg/steamodd
steam/items.py
https://github.com/Lagg/steamodd/blob/2e9ced4e7a6dbe3e09d5a648450bafc12b937b95/steam/items.py#L839-L850
def account_info(self): """ Certain attributes have a user's account information associated with it such as a gifted or crafted item. A dict with two keys: 'persona' and 'id64'. None if the attribute has no account information attached to it. """ account_info = self._attribute.get("account_info") if account_info: return {"persona": account_info.get("personaname", ""), "id64": account_info["steamid"]} else: return None
[ "def", "account_info", "(", "self", ")", ":", "account_info", "=", "self", ".", "_attribute", ".", "get", "(", "\"account_info\"", ")", "if", "account_info", ":", "return", "{", "\"persona\"", ":", "account_info", ".", "get", "(", "\"personaname\"", ",", "\"...
Certain attributes have a user's account information associated with it such as a gifted or crafted item. A dict with two keys: 'persona' and 'id64'. None if the attribute has no account information attached to it.
[ "Certain", "attributes", "have", "a", "user", "s", "account", "information", "associated", "with", "it", "such", "as", "a", "gifted", "or", "crafted", "item", "." ]
python
train
a-tal/nagaram
nagaram/cmdline.py
https://github.com/a-tal/nagaram/blob/2edcb0ef8cb569ebd1c398be826472b4831d6110/nagaram/cmdline.py#L14-L45
def pretty_print(input_word, anagrams, by_length=False): """Prints the anagram results sorted by score to stdout. Args: input_word: the base word we searched on anagrams: generator of (word, score) from anagrams_in_word by_length: a boolean to declare printing by length instead of score """ scores = {} if by_length: noun = "tiles" for word, score in anagrams: try: scores[len(word)].append("{0} ({1:d})".format(word, score)) except KeyError: scores[len(word)] = ["{0} ({1:d})".format(word, score)] else: noun = "points" for word, score in anagrams: try: scores[score].append(word) except KeyError: scores[score] = [word] print("Anagrams for {0}{1}:".format(input_word, " (score)" * by_length)) if not valid_scrabble_word(input_word): print("{0} is not possible in Scrabble.".format(input_word)) for key, value in sorted(scores.items(), reverse=True): print("{0:d} {1}: {2}".format(key, noun, ", ".join(value)))
[ "def", "pretty_print", "(", "input_word", ",", "anagrams", ",", "by_length", "=", "False", ")", ":", "scores", "=", "{", "}", "if", "by_length", ":", "noun", "=", "\"tiles\"", "for", "word", ",", "score", "in", "anagrams", ":", "try", ":", "scores", "[...
Prints the anagram results sorted by score to stdout. Args: input_word: the base word we searched on anagrams: generator of (word, score) from anagrams_in_word by_length: a boolean to declare printing by length instead of score
[ "Prints", "the", "anagram", "results", "sorted", "by", "score", "to", "stdout", "." ]
python
test
samuelcolvin/pydantic
pydantic/schema.py
https://github.com/samuelcolvin/pydantic/blob/bff8a1789dfde2c38928cced6640887b53615aa3/pydantic/schema.py#L408-L474
def field_type_schema( field: Field, *, by_alias: bool, model_name_map: Dict[Type['main.BaseModel'], str], schema_overrides: bool = False, ref_prefix: Optional[str] = None, ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ Used by ``field_schema()``, you probably should be using that function. Take a single ``field`` and generate the schema for its type only, not including additional information as title, etc. Also return additional schema definitions, from sub-models. """ definitions = {} ref_prefix = ref_prefix or default_prefix if field.shape is Shape.LIST: f_schema, f_definitions = field_singleton_schema( field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix ) definitions.update(f_definitions) return {'type': 'array', 'items': f_schema}, definitions elif field.shape is Shape.SET: f_schema, f_definitions = field_singleton_schema( field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix ) definitions.update(f_definitions) return {'type': 'array', 'uniqueItems': True, 'items': f_schema}, definitions elif field.shape is Shape.MAPPING: dict_schema: Dict[str, Any] = {'type': 'object'} key_field = cast(Field, field.key_field) regex = getattr(key_field.type_, 'regex', None) f_schema, f_definitions = field_singleton_schema( field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix ) definitions.update(f_definitions) if regex: # Dict keys have a regex pattern # f_schema might be a schema or empty dict, add it either way dict_schema['patternProperties'] = {regex.pattern: f_schema} elif f_schema: # The dict values are not simply Any, so they need a schema dict_schema['additionalProperties'] = f_schema return dict_schema, definitions elif field.shape is Shape.TUPLE: sub_schema = [] sub_fields = cast(List[Field], field.sub_fields) for sf in sub_fields: sf_schema, sf_definitions = field_type_schema( sf, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix ) definitions.update(sf_definitions) sub_schema.append(sf_schema) if len(sub_schema) == 1: sub_schema = sub_schema[0] # type: ignore return {'type': 'array', 'items': sub_schema}, definitions else: assert field.shape is Shape.SINGLETON, field.shape f_schema, f_definitions = field_singleton_schema( field, by_alias=by_alias, model_name_map=model_name_map, schema_overrides=schema_overrides, ref_prefix=ref_prefix, ) definitions.update(f_definitions) return f_schema, definitions
[ "def", "field_type_schema", "(", "field", ":", "Field", ",", "*", ",", "by_alias", ":", "bool", ",", "model_name_map", ":", "Dict", "[", "Type", "[", "'main.BaseModel'", "]", ",", "str", "]", ",", "schema_overrides", ":", "bool", "=", "False", ",", "ref_...
Used by ``field_schema()``, you probably should be using that function. Take a single ``field`` and generate the schema for its type only, not including additional information as title, etc. Also return additional schema definitions, from sub-models.
[ "Used", "by", "field_schema", "()", "you", "probably", "should", "be", "using", "that", "function", "." ]
python
train
pgmpy/pgmpy
pgmpy/independencies/Independencies.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/independencies/Independencies.py#L135-L239
def closure(self): """ Returns a new `Independencies()`-object that additionally contains those `IndependenceAssertions` that are implied by the the current independencies (using with the `semi-graphoid axioms <https://en.wikipedia.org/w/index.php?title=Conditional_independence&oldid=708760689#Rules_of_conditional_independence>`_; see (Pearl, 1989, `Conditional Independence and its representations <http://www.cs.technion.ac.il/~dang/journal_papers/pearl1989conditional.pdf>`_)). Might be very slow if more than six variables are involved. Examples -------- >>> from pgmpy.independencies import Independencies >>> ind1 = Independencies(('A', ['B', 'C'], 'D')) >>> ind1.closure() (A _|_ B | D, C) (A _|_ B, C | D) (A _|_ B | D) (A _|_ C | D, B) (A _|_ C | D) >>> ind2 = Independencies(('W', ['X', 'Y', 'Z'])) >>> ind2.closure() (W _|_ Y) (W _|_ Y | X) (W _|_ Z | Y) (W _|_ Z, X, Y) (W _|_ Z) (W _|_ Z, X) (W _|_ X, Y) (W _|_ Z | X) (W _|_ Z, Y | X) [..] """ def single_var(var): "Checks if var represents a single variable" if not hasattr(var, '__iter__'): return True else: return len(var) == 1 def sg0(ind): "Symmetry rule: 'X ⟂ Y | Z' -> 'Y ⟂ X | Z'" return IndependenceAssertion(ind.event2, ind.event1, ind.event3) # since X⟂Y|Z == Y⟂X|Z in pgmpy, sg0 (symmetry) is not used as an axiom/rule. # instead we use a decorator for the other axioms to apply them on both sides def apply_left_and_right(func): def symmetric_func(*args): if len(args) == 1: return func(args[0]) + func(sg0(args[0])) if len(args) == 2: return (func(*args) + func(args[0], sg0(args[1])) + func(sg0(args[0]), args[1]) + func(sg0(args[0]), sg0(args[1]))) return symmetric_func @apply_left_and_right def sg1(ind): "Decomposition rule: 'X ⟂ Y,W | Z' -> 'X ⟂ Y | Z', 'X ⟂ W | Z'" if single_var(ind.event2): return [] else: return [IndependenceAssertion(ind.event1, ind.event2 - {elem}, ind.event3) for elem in ind.event2] @apply_left_and_right def sg2(ind): "Weak Union rule: 'X ⟂ Y,W | Z' -> 'X ⟂ Y | W,Z', 'X ⟂ W | Y,Z' " if single_var(ind.event2): return [] else: return [IndependenceAssertion(ind.event1, ind.event2 - {elem}, {elem} | ind.event3) for elem in ind.event2] @apply_left_and_right def sg3(ind1, ind2): "Contraction rule: 'X ⟂ W | Y,Z' & 'X ⟂ Y | Z' -> 'X ⟂ W,Y | Z'" if ind1.event1 != ind2.event1: return [] Y = ind2.event2 Z = ind2.event3 Y_Z = ind1.event3 if Y < Y_Z and Z < Y_Z and Y.isdisjoint(Z): return [IndependenceAssertion(ind1.event1, ind1.event2 | Y, Z)] else: return [] # apply semi-graphoid axioms as long as new independencies are found. all_independencies = set() new_inds = set(self.independencies) while new_inds: new_pairs = (set(itertools.permutations(new_inds, 2)) | set(itertools.product(new_inds, all_independencies)) | set(itertools.product(all_independencies, new_inds))) all_independencies |= new_inds new_inds = set(sum([sg1(ind) for ind in new_inds] + [sg2(ind) for ind in new_inds] + [sg3(*inds) for inds in new_pairs], [])) new_inds -= all_independencies return Independencies(*list(all_independencies))
[ "def", "closure", "(", "self", ")", ":", "def", "single_var", "(", "var", ")", ":", "\"Checks if var represents a single variable\"", "if", "not", "hasattr", "(", "var", ",", "'__iter__'", ")", ":", "return", "True", "else", ":", "return", "len", "(", "var",...
Returns a new `Independencies()`-object that additionally contains those `IndependenceAssertions` that are implied by the the current independencies (using with the `semi-graphoid axioms <https://en.wikipedia.org/w/index.php?title=Conditional_independence&oldid=708760689#Rules_of_conditional_independence>`_; see (Pearl, 1989, `Conditional Independence and its representations <http://www.cs.technion.ac.il/~dang/journal_papers/pearl1989conditional.pdf>`_)). Might be very slow if more than six variables are involved. Examples -------- >>> from pgmpy.independencies import Independencies >>> ind1 = Independencies(('A', ['B', 'C'], 'D')) >>> ind1.closure() (A _|_ B | D, C) (A _|_ B, C | D) (A _|_ B | D) (A _|_ C | D, B) (A _|_ C | D) >>> ind2 = Independencies(('W', ['X', 'Y', 'Z'])) >>> ind2.closure() (W _|_ Y) (W _|_ Y | X) (W _|_ Z | Y) (W _|_ Z, X, Y) (W _|_ Z) (W _|_ Z, X) (W _|_ X, Y) (W _|_ Z | X) (W _|_ Z, Y | X) [..]
[ "Returns", "a", "new", "Independencies", "()", "-", "object", "that", "additionally", "contains", "those", "IndependenceAssertions", "that", "are", "implied", "by", "the", "the", "current", "independencies", "(", "using", "with", "the", "semi", "-", "graphoid", ...
python
train
fhamborg/news-please
newsplease/crawler/spiders/rss_crawler.py
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/crawler/spiders/rss_crawler.py#L42-L49
def parse(self, response): """ Extracts the Rss Feed and initiates crawling it. :param obj response: The scrapy response """ yield scrapy.Request(self.helper.url_extractor.get_rss_url(response), callback=self.rss_parse)
[ "def", "parse", "(", "self", ",", "response", ")", ":", "yield", "scrapy", ".", "Request", "(", "self", ".", "helper", ".", "url_extractor", ".", "get_rss_url", "(", "response", ")", ",", "callback", "=", "self", ".", "rss_parse", ")" ]
Extracts the Rss Feed and initiates crawling it. :param obj response: The scrapy response
[ "Extracts", "the", "Rss", "Feed", "and", "initiates", "crawling", "it", "." ]
python
train
rwl/pylon
pyreto/continuous/experiment.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/continuous/experiment.py#L205-L246
def _oneInteraction(self): """ Coordinates one interaction between each agent and its environment. """ self.stepid += 1 logger.info("Entering simulation period %d." % self.stepid) # Apply branches outages. if self.branchOutages is not None: self.doOutages() # Initialise the market. self.market.reset() # Get an action from each agent and perform it. for task, agent in zip(self.tasks, self.agents): # if self.do_optimisation[agent]: # raise Exception("When using a black-box learning algorithm, " # "only full episodes can be done.") # if not task.isFinished(): observation = task.getObservation() agent.integrateObservation(observation) action = agent.getAction() task.performAction(action) # Clear the market. self.market.run() # Reward each agent appropriately. for task, agent in zip(self.tasks, self.agents): # if not task.isFinished(): reward = task.getReward() agent.giveReward(reward) # Scale loads. c = self._pcycle.next() for bus in self.market.case.buses: bus.p_demand = self.pdemand[bus] * c logger.info("")
[ "def", "_oneInteraction", "(", "self", ")", ":", "self", ".", "stepid", "+=", "1", "logger", ".", "info", "(", "\"Entering simulation period %d.\"", "%", "self", ".", "stepid", ")", "# Apply branches outages.", "if", "self", ".", "branchOutages", "is", "not", ...
Coordinates one interaction between each agent and its environment.
[ "Coordinates", "one", "interaction", "between", "each", "agent", "and", "its", "environment", "." ]
python
train
javipalanca/spade
spade/behaviour.py
https://github.com/javipalanca/spade/blob/59942bd1a1edae4c807d06cabb178d5630cbf61b/spade/behaviour.py#L469-L482
def add_state(self, name: str, state: State, initial: bool = False): """ Adds a new state to the FSM. Args: name (str): the name of the state, which is used as its identifier. state (spade.behaviour.State): The state class initial (bool, optional): wether the state is the initial state or not. (Only one initial state is allowed) (Default value = False) """ if not issubclass(state.__class__, State): raise AttributeError("state must be subclass of spade.behaviour.State") self._states[name] = state if initial: self.current_state = name
[ "def", "add_state", "(", "self", ",", "name", ":", "str", ",", "state", ":", "State", ",", "initial", ":", "bool", "=", "False", ")", ":", "if", "not", "issubclass", "(", "state", ".", "__class__", ",", "State", ")", ":", "raise", "AttributeError", "...
Adds a new state to the FSM. Args: name (str): the name of the state, which is used as its identifier. state (spade.behaviour.State): The state class initial (bool, optional): wether the state is the initial state or not. (Only one initial state is allowed) (Default value = False)
[ "Adds", "a", "new", "state", "to", "the", "FSM", "." ]
python
train
lucastheis/django-publications
publications/templatetags/publication_extras.py
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/templatetags/publication_extras.py#L67-L88
def get_publication_list(context, list, template='publications/publications.html'): """ Get a publication list. """ list = List.objects.filter(list__iexact=list) if not list: return '' list = list[0] publications = list.publication_set.all() publications = publications.order_by('-year', '-month', '-id') if not publications: return '' # load custom links and files populate(publications) return render_template( template, context['request'], {'list': list, 'publications': publications})
[ "def", "get_publication_list", "(", "context", ",", "list", ",", "template", "=", "'publications/publications.html'", ")", ":", "list", "=", "List", ".", "objects", ".", "filter", "(", "list__iexact", "=", "list", ")", "if", "not", "list", ":", "return", "''...
Get a publication list.
[ "Get", "a", "publication", "list", "." ]
python
valid
treycucco/bidon
bidon/util/transform.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/transform.py#L73-L86
def get_flattened(dct, names, path_joiner="_"): """Flatten a child dicts, whose resulting keys are joined by path_joiner. E.G. { "valuation": { "currency": "USD", "amount": "100" } } -> { "valuation_currency": "USD", "valuation_amount": "100" } """ new_dct = dict() for key, val in dct.items(): if key in names: child = {path_joiner.join(k): v for k, v in flatten_dict(val, (key, ))} new_dct.update(child) else: new_dct[key] = dct[key] return new_dct
[ "def", "get_flattened", "(", "dct", ",", "names", ",", "path_joiner", "=", "\"_\"", ")", ":", "new_dct", "=", "dict", "(", ")", "for", "key", ",", "val", "in", "dct", ".", "items", "(", ")", ":", "if", "key", "in", "names", ":", "child", "=", "{"...
Flatten a child dicts, whose resulting keys are joined by path_joiner. E.G. { "valuation": { "currency": "USD", "amount": "100" } } -> { "valuation_currency": "USD", "valuation_amount": "100" }
[ "Flatten", "a", "child", "dicts", "whose", "resulting", "keys", "are", "joined", "by", "path_joiner", "." ]
python
train
josiahcarlson/rom
rom/util.py
https://github.com/josiahcarlson/rom/blob/8b5607a856341df85df33422accc30ba9294dbdb/rom/util.py#L489-L499
def flush(self, full=False, all=False, force=False): ''' Call ``.save()`` on all modified entities in the session. Use when you want to flush changes to Redis, but don't want to lose your local session cache. See the ``.commit()`` method for arguments and their meanings. ''' self._init() return self.save(*self.known.values(), full=full, all=all, force=force)
[ "def", "flush", "(", "self", ",", "full", "=", "False", ",", "all", "=", "False", ",", "force", "=", "False", ")", ":", "self", ".", "_init", "(", ")", "return", "self", ".", "save", "(", "*", "self", ".", "known", ".", "values", "(", ")", ",",...
Call ``.save()`` on all modified entities in the session. Use when you want to flush changes to Redis, but don't want to lose your local session cache. See the ``.commit()`` method for arguments and their meanings.
[ "Call", ".", "save", "()", "on", "all", "modified", "entities", "in", "the", "session", ".", "Use", "when", "you", "want", "to", "flush", "changes", "to", "Redis", "but", "don", "t", "want", "to", "lose", "your", "local", "session", "cache", "." ]
python
test
modin-project/modin
modin/engines/base/frame/partition_manager.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L200-L216
def map_across_blocks(self, map_func): """Applies `map_func` to every partition. Args: map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this. """ preprocessed_map_func = self.preprocess_func(map_func) new_partitions = np.array( [ [part.apply(preprocessed_map_func) for part in row_of_parts] for row_of_parts in self.partitions ] ) return self.__constructor__(new_partitions)
[ "def", "map_across_blocks", "(", "self", ",", "map_func", ")", ":", "preprocessed_map_func", "=", "self", ".", "preprocess_func", "(", "map_func", ")", "new_partitions", "=", "np", ".", "array", "(", "[", "[", "part", ".", "apply", "(", "preprocessed_map_func"...
Applies `map_func` to every partition. Args: map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this.
[ "Applies", "map_func", "to", "every", "partition", "." ]
python
train
django-leonardo/django-leonardo
leonardo/utils/settings.py
https://github.com/django-leonardo/django-leonardo/blob/4b933e1792221a13b4028753d5f1d3499b0816d4/leonardo/utils/settings.py#L69-L80
def _is_leonardo_module(whatever): '''check if is leonardo module''' # check if is python module if hasattr(whatever, 'default') \ or hasattr(whatever, 'leonardo_module_conf'): return True # check if is python object for key in dir(whatever): if 'LEONARDO' in key: return True
[ "def", "_is_leonardo_module", "(", "whatever", ")", ":", "# check if is python module", "if", "hasattr", "(", "whatever", ",", "'default'", ")", "or", "hasattr", "(", "whatever", ",", "'leonardo_module_conf'", ")", ":", "return", "True", "# check if is python object",...
check if is leonardo module
[ "check", "if", "is", "leonardo", "module" ]
python
train
SpriteLink/NIPAP
nipap-www/nipapwww/controllers/xhr.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-www/nipapwww/controllers/xhr.py#L749-L786
def get_current_vrfs(self): """ Return VRF filter list from session variable Before returning list, make a search for all VRFs currently in the list to verify that they still exist. """ # Verify that all currently selected VRFs still exists cur_vrfs = session.get('current_vrfs', {}).items() if len(cur_vrfs) > 0: q = { 'operator': 'equals', 'val1': 'id', 'val2': cur_vrfs[0][0] } if len(cur_vrfs) > 1: for vrf_id, vrf in cur_vrfs[1:]: q = { 'operator': 'or', 'val1': q, 'val2': { 'operator': 'equals', 'val1': 'id', 'val2': vrf_id } } res = VRF.search(q) session['current_vrfs'] = {} for vrf in res['result']: session['current_vrfs'][vrf.id] = { 'id': vrf.id, 'rt': vrf.rt, 'name': vrf.name, 'description': vrf.description } session.save() return json.dumps(session.get('current_vrfs', {}))
[ "def", "get_current_vrfs", "(", "self", ")", ":", "# Verify that all currently selected VRFs still exists", "cur_vrfs", "=", "session", ".", "get", "(", "'current_vrfs'", ",", "{", "}", ")", ".", "items", "(", ")", "if", "len", "(", "cur_vrfs", ")", ">", "0", ...
Return VRF filter list from session variable Before returning list, make a search for all VRFs currently in the list to verify that they still exist.
[ "Return", "VRF", "filter", "list", "from", "session", "variable" ]
python
train
aisthesis/pynance
pynance/opt/core.py
https://github.com/aisthesis/pynance/blob/9eb0d78b60fe2a324ed328d026fedb6dbe8f7f41/pynance/opt/core.py#L69-L96
def info(self): """ Show expiration dates, equity price, quote time. Returns ------- self : :class:`~pynance.opt.core.Options` Returns a reference to the calling object to allow chaining. expiries : :class:`pandas.tseries.index.DatetimeIndex` Examples -------- >>> fopt, fexp = pn.opt.get('f').info() Expirations: ... Stock: 16.25 Quote time: 2015-03-01 16:00 """ print("Expirations:") _i = 0 for _datetime in self.data.index.levels[1].to_pydatetime(): print("{:2d} {}".format(_i, _datetime.strftime('%Y-%m-%d'))) _i += 1 print("Stock: {:.2f}".format(self.data.iloc[0].loc['Underlying_Price'])) print("Quote time: {}".format(self.quotetime().strftime('%Y-%m-%d %H:%M%z'))) return self, self.exps()
[ "def", "info", "(", "self", ")", ":", "print", "(", "\"Expirations:\"", ")", "_i", "=", "0", "for", "_datetime", "in", "self", ".", "data", ".", "index", ".", "levels", "[", "1", "]", ".", "to_pydatetime", "(", ")", ":", "print", "(", "\"{:2d} {}\"",...
Show expiration dates, equity price, quote time. Returns ------- self : :class:`~pynance.opt.core.Options` Returns a reference to the calling object to allow chaining. expiries : :class:`pandas.tseries.index.DatetimeIndex` Examples -------- >>> fopt, fexp = pn.opt.get('f').info() Expirations: ... Stock: 16.25 Quote time: 2015-03-01 16:00
[ "Show", "expiration", "dates", "equity", "price", "quote", "time", "." ]
python
train
dropbox/stone
stone/ir/api.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/ir/api.py#L323-L337
def get_namespaces_imported_by_route_io(self): # type: () -> typing.List[ApiNamespace] """ Returns a list of Namespace objects. A namespace is a member of this list if it is imported by the current namespace and has a data type from it referenced as an argument, result, or error of a route. Namespaces are in ASCII order by name. """ namespace_data_types = sorted(self.get_route_io_data_types(), key=lambda dt: dt.name) referenced_namespaces = set() for data_type in namespace_data_types: if data_type.namespace != self: referenced_namespaces.add(data_type.namespace) return sorted(referenced_namespaces, key=lambda n: n.name)
[ "def", "get_namespaces_imported_by_route_io", "(", "self", ")", ":", "# type: () -> typing.List[ApiNamespace]", "namespace_data_types", "=", "sorted", "(", "self", ".", "get_route_io_data_types", "(", ")", ",", "key", "=", "lambda", "dt", ":", "dt", ".", "name", ")"...
Returns a list of Namespace objects. A namespace is a member of this list if it is imported by the current namespace and has a data type from it referenced as an argument, result, or error of a route. Namespaces are in ASCII order by name.
[ "Returns", "a", "list", "of", "Namespace", "objects", ".", "A", "namespace", "is", "a", "member", "of", "this", "list", "if", "it", "is", "imported", "by", "the", "current", "namespace", "and", "has", "a", "data", "type", "from", "it", "referenced", "as"...
python
train
openearth/bmi-python
bmi/wrapper.py
https://github.com/openearth/bmi-python/blob/2f53f24d45515eb0711c2d28ddd6c1582045248f/bmi/wrapper.py#L701-L711
def set_current_time(self, current_time): """ sets current time of simulation """ current_time = c_double(current_time) try: self.library.set_current_time.argtypes = [POINTER(c_double)] self.library.set_current_time.restype = None self.library.set_current_time(byref(current_time)) except AttributeError: logger.warn("Tried to set current time but method is not implemented in %s", self.engine)
[ "def", "set_current_time", "(", "self", ",", "current_time", ")", ":", "current_time", "=", "c_double", "(", "current_time", ")", "try", ":", "self", ".", "library", ".", "set_current_time", ".", "argtypes", "=", "[", "POINTER", "(", "c_double", ")", "]", ...
sets current time of simulation
[ "sets", "current", "time", "of", "simulation" ]
python
train
hydraplatform/hydra-base
hydra_base/lib/attributes.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/attributes.py#L481-L497
def get_resource_attribute(resource_attr_id, **kwargs): """ Get a specific resource attribte, by ID If type_id is Gspecified, only return the resource attributes within the type. """ resource_attr_qry = db.DBSession.query(ResourceAttr).filter( ResourceAttr.id == resource_attr_id, ) resource_attr = resource_attr_qry.first() if resource_attr is None: raise ResourceNotFoundError("Resource attribute %s does not exist", resource_attr_id) return resource_attr
[ "def", "get_resource_attribute", "(", "resource_attr_id", ",", "*", "*", "kwargs", ")", ":", "resource_attr_qry", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceAttr", ")", ".", "filter", "(", "ResourceAttr", ".", "id", "==", "resource_attr_id", ",",...
Get a specific resource attribte, by ID If type_id is Gspecified, only return the resource attributes within the type.
[ "Get", "a", "specific", "resource", "attribte", "by", "ID", "If", "type_id", "is", "Gspecified", "only", "return", "the", "resource", "attributes", "within", "the", "type", "." ]
python
train
tanghaibao/jcvi
jcvi/variation/str.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/str.py#L350-L405
def treds(args): """ %prog treds hli.tred.tsv Compile allele_frequency for TREDs results. Write data.tsv, meta.tsv and mask.tsv in one go. """ p = OptionParser(treds.__doc__) p.add_option("--csv", default=False, action="store_true", help="Also write `meta.csv`") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) tredresults, = args df = pd.read_csv(tredresults, sep="\t") tredsfile = datafile("TREDs.meta.csv") tf = pd.read_csv(tredsfile) tds = list(tf["abbreviation"]) ids = list(tf["id"]) tags = ["SampleKey"] final_columns = ["SampleKey"] afs = [] for td, id in zip(tds, ids): tag1 = "{}.1".format(td) tag2 = "{}.2".format(td) if tag2 not in df: afs.append("{}") continue tags.append(tag2) final_columns.append(id) a = np.array(list(df[tag1]) + list(df[tag2])) counts = alleles_to_counts(a) af = counts_to_af(counts) afs.append(af) tf["allele_frequency"] = afs metafile = "TREDs_{}_SEARCH.meta.tsv".format(timestamp()) tf.to_csv(metafile, sep="\t", index=False) logging.debug("File `{}` written.".format(metafile)) if opts.csv: metacsvfile = metafile.rsplit(".", 1)[0] + ".csv" tf.to_csv(metacsvfile, index=False) logging.debug("File `{}` written.".format(metacsvfile)) pp = df[tags] pp.columns = final_columns datafile = "TREDs_{}_SEARCH.data.tsv".format(timestamp()) pp.to_csv(datafile, sep="\t", index=False) logging.debug("File `{}` written.".format(datafile)) mask([datafile, metafile])
[ "def", "treds", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "treds", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--csv\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Also write `meta.csv`\"",...
%prog treds hli.tred.tsv Compile allele_frequency for TREDs results. Write data.tsv, meta.tsv and mask.tsv in one go.
[ "%prog", "treds", "hli", ".", "tred", ".", "tsv" ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py#L197-L254
def cache_response(self, request, response, body=None): """ Algorithm for caching requests. This assumes a requests Response object. """ # From httplib2: Don't cache 206's since we aren't going to # handle byte range requests if response.status not in [200, 203, 300, 301]: return response_headers = CaseInsensitiveDict(response.headers) cc_req = self.parse_cache_control(request.headers) cc = self.parse_cache_control(response_headers) cache_url = self.cache_url(request.url) # Delete it from the cache if we happen to have it stored there no_store = cc.get('no-store') or cc_req.get('no-store') if no_store and self.cache.get(cache_url): self.cache.delete(cache_url) # If we've been given an etag, then keep the response if self.cache_etags and 'etag' in response_headers: self.cache.set( cache_url, self.serializer.dumps(request, response, body=body), ) # Add to the cache any 301s. We do this before looking that # the Date headers. elif response.status == 301: self.cache.set( cache_url, self.serializer.dumps(request, response) ) # Add to the cache if the response headers demand it. If there # is no date header then we can't do anything about expiring # the cache. elif 'date' in response_headers: # cache when there is a max-age > 0 if cc and cc.get('max-age'): if int(cc['max-age']) > 0: self.cache.set( cache_url, self.serializer.dumps(request, response, body=body), ) # If the request can expire, it means we should cache it # in the meantime. elif 'expires' in response_headers: if response_headers['expires']: self.cache.set( cache_url, self.serializer.dumps(request, response, body=body), )
[ "def", "cache_response", "(", "self", ",", "request", ",", "response", ",", "body", "=", "None", ")", ":", "# From httplib2: Don't cache 206's since we aren't going to", "# handle byte range requests", "if", "response", ".", "status", "not", "in", "[", "2...
Algorithm for caching requests. This assumes a requests Response object.
[ "Algorithm", "for", "caching", "requests", "." ]
python
test
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/model.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/model.py#L480-L520
def download_all(self, check_before_download=True): '''download_all High-level api: Convert cxml tree to an internal schema tree. This method is recursive. Parameters ---------- check_before_download : `bool` True if checking capabilities.txt file is required. Returns ------- None Nothing returns. ''' # check the content of self.yang_capabilities if check_before_download: if not self.need_download: logger.info('Skip downloading as the content of {} matches ' \ 'device hello message' \ .format(self.yang_capabilities)) return # clean up folder self.dir_yang for root, dirs, files in os.walk(self.dir_yang): for f in files: os.remove(os.path.join(root, f)) # download all self.to_be_downloaded = set(self.device.models_loadable) self.downloaded = set() while self.to_be_downloaded: self.download(self.to_be_downloaded.pop()) # write self.yang_capabilities with open(self.yang_capabilities, 'w') as f: f.write('\n'.join(sorted(list(self.device.server_capabilities))))
[ "def", "download_all", "(", "self", ",", "check_before_download", "=", "True", ")", ":", "# check the content of self.yang_capabilities", "if", "check_before_download", ":", "if", "not", "self", ".", "need_download", ":", "logger", ".", "info", "(", "'Skip downloading...
download_all High-level api: Convert cxml tree to an internal schema tree. This method is recursive. Parameters ---------- check_before_download : `bool` True if checking capabilities.txt file is required. Returns ------- None Nothing returns.
[ "download_all" ]
python
train
bspaans/python-mingus
mingus/midi/sequencer.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/sequencer.py#L129-L145
def play_Note(self, note, channel=1, velocity=100): """Play a Note object on a channel with a velocity[0-127]. You can either specify the velocity and channel here as arguments or you can set the Note.velocity and Note.channel attributes, which will take presedence over the function arguments. """ if hasattr(note, 'velocity'): velocity = note.velocity if hasattr(note, 'channel'): channel = note.channel self.play_event(int(note) + 12, int(channel), int(velocity)) self.notify_listeners(self.MSG_PLAY_INT, {'channel': int(channel), 'note': int(note) + 12, 'velocity': int(velocity)}) self.notify_listeners(self.MSG_PLAY_NOTE, {'channel': int(channel), 'note': note, 'velocity': int(velocity)}) return True
[ "def", "play_Note", "(", "self", ",", "note", ",", "channel", "=", "1", ",", "velocity", "=", "100", ")", ":", "if", "hasattr", "(", "note", ",", "'velocity'", ")", ":", "velocity", "=", "note", ".", "velocity", "if", "hasattr", "(", "note", ",", "...
Play a Note object on a channel with a velocity[0-127]. You can either specify the velocity and channel here as arguments or you can set the Note.velocity and Note.channel attributes, which will take presedence over the function arguments.
[ "Play", "a", "Note", "object", "on", "a", "channel", "with", "a", "velocity", "[", "0", "-", "127", "]", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L72-L100
def _merge_wf_inputs(new, out, wf_outputs, to_ignore, parallel, nested_inputs): """Merge inputs for a sub-workflow, adding any not present inputs in out. Skips inputs that are internally generated or generated and ignored, keeping only as inputs those that we do not generate internally. """ internal_generated_ids = [] for vignore in to_ignore: vignore_id = _get_string_vid(vignore) # ignore anything we generate internally, but not those we need to pull in # from the external process if vignore_id not in [v["id"] for v in wf_outputs]: internal_generated_ids.append(vignore_id) ignore_ids = set(internal_generated_ids + [v["id"] for v in wf_outputs]) cur_ids = set([v["id"] for v in out]) remapped_new = [] for v in new: remapped_v = copy.deepcopy(v) outv = copy.deepcopy(v) outv["id"] = get_base_id(v["id"]) outv["source"] = v["id"] if outv["id"] not in cur_ids and outv["id"] not in ignore_ids: if nested_inputs and v["id"] in nested_inputs: outv = _flatten_nested_input(outv) out.append(outv) if remapped_v["id"] in set([v["source"] for v in out]): remapped_v["source"] = get_base_id(remapped_v["id"]) remapped_new.append(remapped_v) return out, remapped_new
[ "def", "_merge_wf_inputs", "(", "new", ",", "out", ",", "wf_outputs", ",", "to_ignore", ",", "parallel", ",", "nested_inputs", ")", ":", "internal_generated_ids", "=", "[", "]", "for", "vignore", "in", "to_ignore", ":", "vignore_id", "=", "_get_string_vid", "(...
Merge inputs for a sub-workflow, adding any not present inputs in out. Skips inputs that are internally generated or generated and ignored, keeping only as inputs those that we do not generate internally.
[ "Merge", "inputs", "for", "a", "sub", "-", "workflow", "adding", "any", "not", "present", "inputs", "in", "out", "." ]
python
train
BernardFW/bernard
src/bernard/engine/triggers.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/triggers.py#L119-L131
async def get_value(self): """ Get the value from the API. Make sure to use a lock in order not to fetch the value twice at the same time. """ cc = self.request.custom_content async with self.lock: if self.content_key not in cc: cc[self.content_key] = await self.call_api() return cc[self.content_key]
[ "async", "def", "get_value", "(", "self", ")", ":", "cc", "=", "self", ".", "request", ".", "custom_content", "async", "with", "self", ".", "lock", ":", "if", "self", ".", "content_key", "not", "in", "cc", ":", "cc", "[", "self", ".", "content_key", ...
Get the value from the API. Make sure to use a lock in order not to fetch the value twice at the same time.
[ "Get", "the", "value", "from", "the", "API", ".", "Make", "sure", "to", "use", "a", "lock", "in", "order", "not", "to", "fetch", "the", "value", "twice", "at", "the", "same", "time", "." ]
python
train
kennethreitz/legit
legit/cli.py
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L262-L278
def do_install(ctx, verbose, fake): """Installs legit git aliases.""" click.echo('The following git aliases will be installed:\n') aliases = cli.list_commands(ctx) output_aliases(aliases) if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake): for alias in aliases: cmd = '!legit ' + alias system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd) verbose_echo(system_command, verbose, fake) if not fake: os.system(system_command) if not fake: click.echo("\nAliases installed.") else: click.echo("\nAliases will not be installed.")
[ "def", "do_install", "(", "ctx", ",", "verbose", ",", "fake", ")", ":", "click", ".", "echo", "(", "'The following git aliases will be installed:\\n'", ")", "aliases", "=", "cli", ".", "list_commands", "(", "ctx", ")", "output_aliases", "(", "aliases", ")", "i...
Installs legit git aliases.
[ "Installs", "legit", "git", "aliases", "." ]
python
train
jf-parent/brome
brome/core/proxy_driver.py
https://github.com/jf-parent/brome/blob/784f45d96b83b703dd2181cb59ca8ea777c2510e/brome/core/proxy_driver.py#L857-L886
def embed(self, title=''): """Start an IPython embed Calling embed won't do anything in a multithread context The stack_depth will be found automatically """ if self.embed_disabled: self.warning_log("Embed is disabled when runned from the grid runner because of the multithreading") # noqa return False from IPython.terminal.embed import InteractiveShellEmbed if BROME_CONFIG['runner']['play_sound_on_ipython_embed']: say(BROME_CONFIG['runner']['sound_on_ipython_embed']) ipshell = InteractiveShellEmbed(banner1=title) frame = currentframe() stack_depth = 1 for i in range(5): frame = frame.f_back stack_depth += 1 if frame.f_code.co_filename not in __file__: break msg = 'Stopped at %s and line %s;' % \ (frame.f_code.co_filename, frame.f_lineno) ipshell(msg, stack_depth=stack_depth)
[ "def", "embed", "(", "self", ",", "title", "=", "''", ")", ":", "if", "self", ".", "embed_disabled", ":", "self", ".", "warning_log", "(", "\"Embed is disabled when runned from the grid runner because of the multithreading\"", ")", "# noqa", "return", "False", "from",...
Start an IPython embed Calling embed won't do anything in a multithread context The stack_depth will be found automatically
[ "Start", "an", "IPython", "embed" ]
python
train
saltstack/salt
salt/utils/gitfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L965-L974
def env_is_exposed(self, tgt_env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' return salt.utils.stringutils.check_whitelist_blacklist( tgt_env, whitelist=self.saltenv_whitelist, blacklist=self.saltenv_blacklist, )
[ "def", "env_is_exposed", "(", "self", ",", "tgt_env", ")", ":", "return", "salt", ".", "utils", ".", "stringutils", ".", "check_whitelist_blacklist", "(", "tgt_env", ",", "whitelist", "=", "self", ".", "saltenv_whitelist", ",", "blacklist", "=", "self", ".", ...
Check if an environment is exposed by comparing it against a whitelist and blacklist.
[ "Check", "if", "an", "environment", "is", "exposed", "by", "comparing", "it", "against", "a", "whitelist", "and", "blacklist", "." ]
python
train
kblin/ncbi-genome-download
ncbi_genome_download/__main__.py
https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/__main__.py#L8-L32
def main(): """Build and parse command line.""" parser = argument_parser(version=__version__) args = parser.parse_args() if args.debug: log_level = logging.DEBUG elif args.verbose: log_level = logging.INFO else: log_level = logging.WARNING logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) max_retries = args.retries attempts = 0 ret = args_download(args) while ret == 75 and attempts < max_retries: attempts += 1 logging.error( 'Downloading from NCBI failed due to a connection error, retrying. Retries so far: %s', attempts) ret = args_download(args) return ret
[ "def", "main", "(", ")", ":", "parser", "=", "argument_parser", "(", "version", "=", "__version__", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "debug", ":", "log_level", "=", "logging", ".", "DEBUG", "elif", "args", "."...
Build and parse command line.
[ "Build", "and", "parse", "command", "line", "." ]
python
train
mikedh/trimesh
trimesh/path/arc.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/arc.py#L159-L205
def to_threepoint(center, radius, angles=None): """ For 2D arcs, given a center and radius convert them to three points on the arc. Parameters ----------- center : (2,) float Center point on the plane radius : float Radius of arc angles : (2,) float Angles in radians for start and end angle if not specified, will default to (0.0, pi) Returns ---------- three : (3, 2) float Arc control points """ # if no angles provided assume we want a half circle if angles is None: angles = [0.0, np.pi] # force angles to float64 angles = np.asanyarray(angles, dtype=np.float64) if angles.shape != (2,): raise ValueError('angles must be (2,)!') # provide the wrap around if angles[1] < angles[0]: angles[1] += np.pi * 2 center = np.asanyarray(center, dtype=np.float64) if center.shape != (2,): raise ValueError('only valid on 2D arcs!') # turn the angles of [start, end] # into [start, middle, end] angles = np.array([angles[0], angles.mean(), angles[1]], dtype=np.float64) # turn angles into (3,2) points three = np.column_stack((np.cos(angles), np.sin(angles))) * radius three += center return three
[ "def", "to_threepoint", "(", "center", ",", "radius", ",", "angles", "=", "None", ")", ":", "# if no angles provided assume we want a half circle", "if", "angles", "is", "None", ":", "angles", "=", "[", "0.0", ",", "np", ".", "pi", "]", "# force angles to float6...
For 2D arcs, given a center and radius convert them to three points on the arc. Parameters ----------- center : (2,) float Center point on the plane radius : float Radius of arc angles : (2,) float Angles in radians for start and end angle if not specified, will default to (0.0, pi) Returns ---------- three : (3, 2) float Arc control points
[ "For", "2D", "arcs", "given", "a", "center", "and", "radius", "convert", "them", "to", "three", "points", "on", "the", "arc", "." ]
python
train
readbeyond/aeneas
aeneas/executejob.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/executejob.py#L122-L185
def load_job_from_container(self, container_path, config_string=None): """ Load the job from the given :class:`aeneas.container.Container` object. If ``config_string`` is ``None``, the container must contain a configuration file; otherwise use the provided config string (i.e., the wizard case). :param string container_path: the path to the input container :param string config_string: the configuration string (from wizard) :raises: :class:`~aeneas.executejob.ExecuteJobInputError`: if the given container does not contain a valid :class:`~aeneas.job.Job` """ self.log(u"Loading job from container...") # create working directory where the input container # will be decompressed self.working_directory = gf.tmp_directory(root=self.rconf[RuntimeConfiguration.TMP_PATH]) self.log([u"Created working directory '%s'", self.working_directory]) try: self.log(u"Decompressing input container...") input_container = Container(container_path, logger=self.logger) input_container.decompress(self.working_directory) self.log(u"Decompressing input container... done") except Exception as exc: self.clean() self.log_exc(u"Unable to decompress container '%s': %s" % (container_path, exc), None, True, ExecuteJobInputError) try: self.log(u"Creating job from working directory...") working_container = Container( self.working_directory, logger=self.logger ) analyzer = AnalyzeContainer(working_container, logger=self.logger) self.job = analyzer.analyze(config_string=config_string) self.log(u"Creating job from working directory... done") except Exception as exc: self.clean() self.log_exc(u"Unable to analyze container '%s': %s" % (container_path, exc), None, True, ExecuteJobInputError) if self.job is None: self.log_exc(u"The container '%s' does not contain a valid Job" % (container_path), None, True, ExecuteJobInputError) try: # set absolute path for text file and audio file # for each task in the job self.log(u"Setting absolute paths for tasks...") for task in self.job.tasks: task.text_file_path_absolute = gf.norm_join( self.working_directory, task.text_file_path ) task.audio_file_path_absolute = gf.norm_join( self.working_directory, task.audio_file_path ) self.log(u"Setting absolute paths for tasks... done") self.log(u"Loading job from container: succeeded") except Exception as exc: self.clean() self.log_exc(u"Error while setting absolute paths for tasks", exc, True, ExecuteJobInputError)
[ "def", "load_job_from_container", "(", "self", ",", "container_path", ",", "config_string", "=", "None", ")", ":", "self", ".", "log", "(", "u\"Loading job from container...\"", ")", "# create working directory where the input container", "# will be decompressed", "self", "...
Load the job from the given :class:`aeneas.container.Container` object. If ``config_string`` is ``None``, the container must contain a configuration file; otherwise use the provided config string (i.e., the wizard case). :param string container_path: the path to the input container :param string config_string: the configuration string (from wizard) :raises: :class:`~aeneas.executejob.ExecuteJobInputError`: if the given container does not contain a valid :class:`~aeneas.job.Job`
[ "Load", "the", "job", "from", "the", "given", ":", "class", ":", "aeneas", ".", "container", ".", "Container", "object", "." ]
python
train
Kortemme-Lab/klab
klab/bio/clustalo.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L569-L597
def _determine_representative_chains(self): ''' Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping.''' # todo: This logic should be moved into the FASTA class or a more general module (maybe a fast exists which uses a C/C++ library?) but at present it is easier to write here since we do not need to worry about other PDB IDs. equivalence_fiber = {} matched_chains = set() for chain_id, equivalent_chains in self.identical_sequences.iteritems(): matched_chains.add(chain_id) equivalent_chain_ids = set() for equivalent_chain in equivalent_chains: assert(len(equivalent_chain) == 6) assert((equivalent_chain[:5] == '%s_' % self.pdb_id) or (equivalent_chain[:5] == '%s:' % self.pdb_id)) # ClustalW changes e.g. 1KI1:A to 1KI1_A in its output equivalent_chain_ids.add(equivalent_chain[5]) found = False for equivalent_chain_id in equivalent_chain_ids: if equivalence_fiber.get(equivalent_chain_id): found = True assert(equivalence_fiber[equivalent_chain_id] == equivalent_chain_ids.union(set([chain_id]))) break if not found: equivalence_fiber[chain_id] = set(equivalent_chain_ids) equivalence_fiber[chain_id].add(chain_id) for c in self.chains: if c not in matched_chains: equivalence_fiber[c] = set([c]) self.equivalence_fiber = equivalence_fiber self.representative_chains = equivalence_fiber.keys()
[ "def", "_determine_representative_chains", "(", "self", ")", ":", "# todo: This logic should be moved into the FASTA class or a more general module (maybe a fast exists which uses a C/C++ library?) but at present it is easier to write here since we do not need to worry about other PDB IDs.", "equivale...
Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping.
[ "Quotient", "the", "chains", "to", "get", "equivalence", "classes", "of", "chains", ".", "These", "will", "be", "used", "for", "the", "actual", "mapping", "." ]
python
train