repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
majerteam/sqla_inspect
sqla_inspect/csv.py
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/csv.py#L178-L194
def add_extra_datas(self, extra_datas): """ Add extra datas to the last row headers : [col1, col2, col3, col4, col5] row : {col1: a1, col2: a2, col3: a3} extra_datas : [a4, a5] row becomes : {col1: a1, col2: a2, col3: a3, col4: a4, col5: a5} in case of longer extra_datas, the last columns will be overriden :param list extra_datas: list of values to set in the last columns """ # we will add datas starting from the last index for index, data in enumerate(extra_datas): header = self.extra_headers[index] self._datas[-1][header['label']] = data
[ "def", "add_extra_datas", "(", "self", ",", "extra_datas", ")", ":", "# we will add datas starting from the last index", "for", "index", ",", "data", "in", "enumerate", "(", "extra_datas", ")", ":", "header", "=", "self", ".", "extra_headers", "[", "index", "]", ...
Add extra datas to the last row headers : [col1, col2, col3, col4, col5] row : {col1: a1, col2: a2, col3: a3} extra_datas : [a4, a5] row becomes : {col1: a1, col2: a2, col3: a3, col4: a4, col5: a5} in case of longer extra_datas, the last columns will be overriden :param list extra_datas: list of values to set in the last columns
[ "Add", "extra", "datas", "to", "the", "last", "row", "headers", ":", "[", "col1", "col2", "col3", "col4", "col5", "]", "row", ":", "{", "col1", ":", "a1", "col2", ":", "a2", "col3", ":", "a3", "}", "extra_datas", ":", "[", "a4", "a5", "]" ]
python
train
PlaidWeb/Publ
publ/image/local.py
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/image/local.py#L23-L55
def fix_orientation(image): """ adapted from https://stackoverflow.com/a/30462851/318857 Apply Image.transpose to ensure 0th row of pixels is at the visual top of the image, and 0th column is the visual left-hand side. Return the original image if unable to determine the orientation. As per CIPA DC-008-2012, the orientation field contains an integer, 1 through 8. Other values are reserved. """ exif_orientation_tag = 0x0112 exif_transpose_sequences = [ [], [], [PIL.Image.FLIP_LEFT_RIGHT], [PIL.Image.ROTATE_180], [PIL.Image.FLIP_TOP_BOTTOM], [PIL.Image.FLIP_LEFT_RIGHT, PIL.Image.ROTATE_90], [PIL.Image.ROTATE_270], [PIL.Image.FLIP_TOP_BOTTOM, PIL.Image.ROTATE_90], [PIL.Image.ROTATE_90], ] try: # pylint:disable=protected-access orientation = image._getexif()[exif_orientation_tag] sequence = exif_transpose_sequences[orientation] return functools.reduce(type(image).transpose, sequence, image) except (TypeError, AttributeError, KeyError): # either no EXIF tags or no orientation tag pass return image
[ "def", "fix_orientation", "(", "image", ")", ":", "exif_orientation_tag", "=", "0x0112", "exif_transpose_sequences", "=", "[", "[", "]", ",", "[", "]", ",", "[", "PIL", ".", "Image", ".", "FLIP_LEFT_RIGHT", "]", ",", "[", "PIL", ".", "Image", ".", "ROTAT...
adapted from https://stackoverflow.com/a/30462851/318857 Apply Image.transpose to ensure 0th row of pixels is at the visual top of the image, and 0th column is the visual left-hand side. Return the original image if unable to determine the orientation. As per CIPA DC-008-2012, the orientation field contains an integer, 1 through 8. Other values are reserved.
[ "adapted", "from", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "30462851", "/", "318857" ]
python
train
dotzero/tilda-api-python
tilda/client.py
https://github.com/dotzero/tilda-api-python/blob/0ab984e0236cbfb676b0fbddc1ab37202d92e0a8/tilda/client.py#L137-L144
def get_page_full_export(self, page_id): """ Get full page info for export and body html code """ try: result = self._request('/getpagefullexport/', {'pageid': page_id}) return TildaPage(**result) except NetworkError: return []
[ "def", "get_page_full_export", "(", "self", ",", "page_id", ")", ":", "try", ":", "result", "=", "self", ".", "_request", "(", "'/getpagefullexport/'", ",", "{", "'pageid'", ":", "page_id", "}", ")", "return", "TildaPage", "(", "*", "*", "result", ")", "...
Get full page info for export and body html code
[ "Get", "full", "page", "info", "for", "export", "and", "body", "html", "code" ]
python
train
lemieuxl/pyGenClean
pyGenClean/DupSNPs/duplicated_snps.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/DupSNPs/duplicated_snps.py#L564-L623
def computeFrequency(prefix, outPrefix): """Computes the frequency of the SNPs using Plink. :param prefix: the prefix of the input files. :param outPrefix: the prefix of the output files. :type prefix: str :type outPrefix: str :returns: a :py:class:`dict` containing the frequency of each marker. Start by computing the frequency of all markers using Plink. Then, it reads the output file, and saves the frequency and allele information. """ # The plink command plinkCommand = ["plink", "--noweb", "--tfile", prefix, "--freq", "--out", outPrefix + ".duplicated_snps"] runCommand(plinkCommand) # Reading the frequency file snpFreq = {} try: with open(outPrefix + ".duplicated_snps.frq", "r") as inputFile: headerIndex = None for i, line in enumerate(inputFile): row = createRowFromPlinkSpacedOutput(line) if i == 0: # This is the header headerIndex = dict([ (row[j], j) for j in xrange(len(row)) ]) # Checking the column titles for columnTitle in ["SNP", "MAF", "A1", "A2"]: if columnTitle not in headerIndex: msg = "%(outPrefix)s.duplicated_snps.frq: no " \ "column called %(columnTitle)s" % locals() raise ProgramError(msg) else: # This is data snpName = row[headerIndex["SNP"]] maf = row[headerIndex["MAF"]] a1 = row[headerIndex["A1"]] a2 = row[headerIndex["A2"]] try: if maf == "NA": maf = 0.0 else: maf = float(maf) except ValueError: msg = "%(outPrefix)s.duplicated_snps.frq: %(maf)s: " \ "not a valid MAF" % locals() raise ProgramError(msg) snpFreq[snpName] = (maf, (a1, a2)) except IOError: msg = "%(outPrefix)s.duplicated_snps.freq: no such file" % locals() raise ProgramError(msg) return snpFreq
[ "def", "computeFrequency", "(", "prefix", ",", "outPrefix", ")", ":", "# The plink command", "plinkCommand", "=", "[", "\"plink\"", ",", "\"--noweb\"", ",", "\"--tfile\"", ",", "prefix", ",", "\"--freq\"", ",", "\"--out\"", ",", "outPrefix", "+", "\".duplicated_sn...
Computes the frequency of the SNPs using Plink. :param prefix: the prefix of the input files. :param outPrefix: the prefix of the output files. :type prefix: str :type outPrefix: str :returns: a :py:class:`dict` containing the frequency of each marker. Start by computing the frequency of all markers using Plink. Then, it reads the output file, and saves the frequency and allele information.
[ "Computes", "the", "frequency", "of", "the", "SNPs", "using", "Plink", "." ]
python
train
facelessuser/backrefs
backrefs/bre.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/bre.py#L419-L431
def sub(pattern, repl, string, *args, **kwargs): """Apply `sub` after applying backrefs.""" flags = args[4] if len(args) > 4 else kwargs.get('flags', 0) is_replace = _is_replace(repl) is_string = isinstance(repl, (str, bytes)) if is_replace and repl.use_format: raise ValueError("Compiled replace cannot be a format object!") pattern = compile_search(pattern, flags) return _re.sub( pattern, (compile_replace(pattern, repl) if is_replace or is_string else repl), string, *args, **kwargs )
[ "def", "sub", "(", "pattern", ",", "repl", ",", "string", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "flags", "=", "args", "[", "4", "]", "if", "len", "(", "args", ")", ">", "4", "else", "kwargs", ".", "get", "(", "'flags'", ",", "0...
Apply `sub` after applying backrefs.
[ "Apply", "sub", "after", "applying", "backrefs", "." ]
python
train
coursera-dl/coursera-dl
coursera/api.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L1349-L1392
def extract_links_from_reference(self, short_id): """ Return a dictionary with supplement files (pdf, csv, zip, ipynb, html and so on) extracted from supplement page. @return: @see CourseraOnDemand._extract_links_from_text """ logging.debug('Gathering resource URLs for short_id <%s>.', short_id) try: dom = get_page(self._session, OPENCOURSE_REFERENCE_ITEM_URL, json=True, course_id=self._course_id, short_id=short_id) resource_content = {} # Supplement content has structure as follows: # 'linked' { # 'openCourseAssets.v1' [ { # 'definition' { # 'value' for asset in dom['linked']['openCourseAssets.v1']: value = asset['definition']['value'] # Supplement lecture types are known to contain both <asset> tags # and <a href> tags (depending on the course), so we extract # both of them. extend_supplement_links( resource_content, self._extract_links_from_text(value)) instructions = (IN_MEMORY_MARKER + self._markup_to_html(value), 'resources') extend_supplement_links( resource_content, {IN_MEMORY_EXTENSION: [instructions]}) return resource_content except requests.exceptions.HTTPError as exception: logging.error('Could not download supplement %s: %s', short_id, exception) if is_debug_run(): logging.exception('Could not download supplement %s: %s', short_id, exception) return None
[ "def", "extract_links_from_reference", "(", "self", ",", "short_id", ")", ":", "logging", ".", "debug", "(", "'Gathering resource URLs for short_id <%s>.'", ",", "short_id", ")", "try", ":", "dom", "=", "get_page", "(", "self", ".", "_session", ",", "OPENCOURSE_RE...
Return a dictionary with supplement files (pdf, csv, zip, ipynb, html and so on) extracted from supplement page. @return: @see CourseraOnDemand._extract_links_from_text
[ "Return", "a", "dictionary", "with", "supplement", "files", "(", "pdf", "csv", "zip", "ipynb", "html", "and", "so", "on", ")", "extracted", "from", "supplement", "page", "." ]
python
train
hollenstein/maspy
maspy/inference.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/inference.py#L738-L811
def _findRedundantProteins(protToPeps, pepToProts, proteins=None): """Returns a set of proteins with redundant peptide evidence. After removing the redundant proteins from the "protToPeps" and "pepToProts" mapping, all remaining proteins have at least one unique peptide. The remaining proteins are a "minimal" set of proteins that are able to explain all peptides. However, this is not guaranteed to be the optimal solution with the least number of proteins. In addition it is possible that multiple solutions with the same number of "minimal" proteins exist. Procedure for finding the redundant proteins: 1. Generate a list of proteins that do not contain any unique peptides, a unique peptide has exactly one protein entry in "pepToProts". 2. Proteins are first sorted in ascending order of the number of peptides. Proteins with an equal number of peptides are sorted in descending order of their sorted peptide frequencies (= proteins per peptide). If two proteins are still equal, they are sorted alpha numerical in descending order according to their protein names. For example in the case of a tie between proteins "A" and "B", protein "B" would be removed. 3. Parse this list of sorted non unique proteins; If all its peptides have a frequency value of greater 1; mark the protein as redundant; remove its peptides from the peptide frequency count, continue with the next entry. 4. Return the set of proteins marked as redundant. :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: iterable, proteins that are tested for being redundant. If None all proteins in "protToPeps" are parsed. :returns: a set of redundant proteins, i.e. proteins that are not necessary to explain all peptides """ if proteins is None: proteins = viewkeys(protToPeps) pepFrequency = _getValueCounts(pepToProts) protPepCounts = _getValueCounts(protToPeps) getCount = operator.itemgetter(1) getProt = operator.itemgetter(0) #TODO: quick and dirty solution #NOTE: add a test for merged proteins proteinTuples = list() for protein in proteins: if isinstance(protein, tuple): proteinTuples.append(protein) else: proteinTuples.append(tuple([protein])) sort = list() for protein in sorted(proteinTuples, reverse=True): if len(protein) == 1: protein = protein[0] protPepFreq = [pepFrequency[pep] for pep in protToPeps[protein]] if min(protPepFreq) > 1: sortValue = (len(protPepFreq)*-1, sorted(protPepFreq, reverse=True)) sort.append((protein, sortValue)) sortedProteins = map(getProt, sorted(sort, key=getCount, reverse=True)) redundantProteins = set() for protein in sortedProteins: for pep in protToPeps[protein]: if pepFrequency[pep] <= 1: break else: protPepFrequency = Counter(protToPeps[protein]) pepFrequency.subtract(protPepFrequency) redundantProteins.add(protein) return redundantProteins
[ "def", "_findRedundantProteins", "(", "protToPeps", ",", "pepToProts", ",", "proteins", "=", "None", ")", ":", "if", "proteins", "is", "None", ":", "proteins", "=", "viewkeys", "(", "protToPeps", ")", "pepFrequency", "=", "_getValueCounts", "(", "pepToProts", ...
Returns a set of proteins with redundant peptide evidence. After removing the redundant proteins from the "protToPeps" and "pepToProts" mapping, all remaining proteins have at least one unique peptide. The remaining proteins are a "minimal" set of proteins that are able to explain all peptides. However, this is not guaranteed to be the optimal solution with the least number of proteins. In addition it is possible that multiple solutions with the same number of "minimal" proteins exist. Procedure for finding the redundant proteins: 1. Generate a list of proteins that do not contain any unique peptides, a unique peptide has exactly one protein entry in "pepToProts". 2. Proteins are first sorted in ascending order of the number of peptides. Proteins with an equal number of peptides are sorted in descending order of their sorted peptide frequencies (= proteins per peptide). If two proteins are still equal, they are sorted alpha numerical in descending order according to their protein names. For example in the case of a tie between proteins "A" and "B", protein "B" would be removed. 3. Parse this list of sorted non unique proteins; If all its peptides have a frequency value of greater 1; mark the protein as redundant; remove its peptides from the peptide frequency count, continue with the next entry. 4. Return the set of proteins marked as redundant. :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: iterable, proteins that are tested for being redundant. If None all proteins in "protToPeps" are parsed. :returns: a set of redundant proteins, i.e. proteins that are not necessary to explain all peptides
[ "Returns", "a", "set", "of", "proteins", "with", "redundant", "peptide", "evidence", "." ]
python
train
iotile/coretools
iotileemulate/iotile/emulate/reference/controller_features/config_database.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/controller_features/config_database.py#L156-L172
def add_data(self, data): """Add data to the currently in progress entry. Args: data (bytes): The data that we want to add. Returns: int: An error code """ if self.data_size - self.data_index < len(data): return Error.DESTINATION_BUFFER_TOO_SMALL if self.in_progress is not None: self.in_progress.data += data return Error.NO_ERROR
[ "def", "add_data", "(", "self", ",", "data", ")", ":", "if", "self", ".", "data_size", "-", "self", ".", "data_index", "<", "len", "(", "data", ")", ":", "return", "Error", ".", "DESTINATION_BUFFER_TOO_SMALL", "if", "self", ".", "in_progress", "is", "not...
Add data to the currently in progress entry. Args: data (bytes): The data that we want to add. Returns: int: An error code
[ "Add", "data", "to", "the", "currently", "in", "progress", "entry", "." ]
python
train
ianepperson/pyredminews
redmine/redmine.py
https://github.com/ianepperson/pyredminews/blob/b2b0581483632738a3acca3b4e093c181847b813/redmine/redmine.py#L278-L280
def resolve(self, notes=None): '''Save all changes and resolve this issue''' self.set_status(self._redmine.ISSUE_STATUS_ID_RESOLVED, notes=notes)
[ "def", "resolve", "(", "self", ",", "notes", "=", "None", ")", ":", "self", ".", "set_status", "(", "self", ".", "_redmine", ".", "ISSUE_STATUS_ID_RESOLVED", ",", "notes", "=", "notes", ")" ]
Save all changes and resolve this issue
[ "Save", "all", "changes", "and", "resolve", "this", "issue" ]
python
train
openmeteo/pd2hts
pd2hts/__init__.py
https://github.com/openmeteo/pd2hts/blob/b8f982046e2b99680445298b63a488dd76f6e104/pd2hts/__init__.py#L238-L245
def read_minutes_months(self, s): """Return a (minutes, months) tuple after parsing a "M,N" string. """ try: (minutes, months) = [int(x.strip()) for x in s.split(',')] return minutes, months except Exception: raise ParsingError(('Value should be "minutes, months"'))
[ "def", "read_minutes_months", "(", "self", ",", "s", ")", ":", "try", ":", "(", "minutes", ",", "months", ")", "=", "[", "int", "(", "x", ".", "strip", "(", ")", ")", "for", "x", "in", "s", ".", "split", "(", "','", ")", "]", "return", "minutes...
Return a (minutes, months) tuple after parsing a "M,N" string.
[ "Return", "a", "(", "minutes", "months", ")", "tuple", "after", "parsing", "a", "M", "N", "string", "." ]
python
train
vintasoftware/django-role-permissions
rolepermissions/permissions.py
https://github.com/vintasoftware/django-role-permissions/blob/28924361e689e994e0c3575e18104a1a5abd8de6/rolepermissions/permissions.py#L41-L56
def available_perm_status(user): """ Get a boolean map of the permissions available to a user based on that user's roles. """ roles = get_user_roles(user) permission_hash = {} for role in roles: permission_names = role.permission_names_list() for permission_name in permission_names: permission_hash[permission_name] = get_permission( permission_name) in user.user_permissions.all() return permission_hash
[ "def", "available_perm_status", "(", "user", ")", ":", "roles", "=", "get_user_roles", "(", "user", ")", "permission_hash", "=", "{", "}", "for", "role", "in", "roles", ":", "permission_names", "=", "role", ".", "permission_names_list", "(", ")", "for", "per...
Get a boolean map of the permissions available to a user based on that user's roles.
[ "Get", "a", "boolean", "map", "of", "the", "permissions", "available", "to", "a", "user", "based", "on", "that", "user", "s", "roles", "." ]
python
train
linnarsson-lab/loompy
loompy/loompy.py
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L547-L654
def scan(self, *, items: np.ndarray = None, axis: int = None, layers: Iterable = None, key: str = None, batch_size: int = 8 * 64) -> Iterable[Tuple[int, np.ndarray, loompy.LoomView]]: """ Scan across one axis and return batches of rows (columns) as LoomView objects Args ---- items: np.ndarray the indexes [0, 2, 13, ... ,973] of the rows/cols to include along the axis OR: boolean mask array giving the rows/cols to include axis: int 0:rows or 1:cols batch_size: int the chuncks returned at every element of the iterator layers: iterable if specified it will batch scan only across some of the layers of the loom file if layers == None, all layers will be scanned if layers == [""] or "", only the default layer will be scanned key: Name of primary key attribute. If specified, return the values sorted by the key Returns ------ Iterable that yields triplets of (ix, indexes, view) where ix: int first position / how many rows/cols have been yielded alredy indexes: np.ndarray[int] the indexes with the same numbering of the input args cells / genes (i.e. ``np.arange(len(ds.shape[axis]))``) this is ``ix + selection`` view: LoomView a view corresponding to the current chunk """ if axis is None: raise ValueError("Axis must be given (0 = rows, 1 = cols)") if layers is None: layers = self.layers.keys() if layers == "": layers = [""] if (items is not None) and (np.issubdtype(items.dtype, np.bool_)): items = np.where(items)[0] ordering: Union[np.ndarray, slice] = None vals: Dict[str, loompy.MemoryLoomLayer] = {} if axis == 1: if key is not None: ordering = np.argsort(self.ra[key]) else: # keep everything in original order ordering = slice(None) if items is None: items = np.fromiter(range(self.shape[1]), dtype='int') cols_per_chunk = batch_size ix = 0 while ix < self.shape[1]: cols_per_chunk = min(self.shape[1] - ix, cols_per_chunk) selection = items - ix # Pick out the cells that are in this batch selection = selection[np.where(np.logical_and(selection >= 0, selection < cols_per_chunk))[0]] if selection.shape[0] == 0: ix += cols_per_chunk continue # Load the whole chunk from the file, then extract genes and cells using fancy indexing for layer in layers: temp = self.layers[layer][:, ix:ix + cols_per_chunk] temp = temp[ordering, :] temp = temp[:, selection] vals[layer] = loompy.MemoryLoomLayer(layer, temp) lm = loompy.LayerManager(None) for key, layer in vals.items(): lm[key] = loompy.MemoryLoomLayer(key, layer) view = loompy.LoomView(lm, self.ra[ordering], self.ca[ix + selection], self.row_graphs[ordering], self.col_graphs[ix + selection], filename=self.filename, file_attrs=self.attrs) yield (ix, ix + selection, view) ix += cols_per_chunk elif axis == 0: if key is not None: ordering = np.argsort(self.ca[key]) else: # keep everything in original order ordering = slice(None) if items is None: items = np.fromiter(range(self.shape[0]), dtype='int') rows_per_chunk = batch_size ix = 0 while ix < self.shape[0]: rows_per_chunk = min(self.shape[0] - ix, rows_per_chunk) selection = items - ix # Pick out the genes that are in this batch selection = selection[np.where(np.logical_and(selection >= 0, selection < rows_per_chunk))[0]] if selection.shape[0] == 0: ix += rows_per_chunk continue # Load the whole chunk from the file, then extract genes and cells using fancy indexing for layer in layers: temp = self.layers[layer][ix:ix + rows_per_chunk, :] temp = temp[:, ordering] temp = temp[selection, :] vals[layer] = loompy.MemoryLoomLayer(layer, temp) lm = loompy.LayerManager(None) for key, layer in vals.items(): lm[key] = loompy.MemoryLoomLayer(key, layer) view = loompy.LoomView(lm, self.ra[ix + selection], self.ca[ordering], self.row_graphs[ix + selection], self.col_graphs[ordering], filename=self.filename, file_attrs=self.attrs) yield (ix, ix + selection, view) ix += rows_per_chunk else: raise ValueError("axis must be 0 or 1")
[ "def", "scan", "(", "self", ",", "*", ",", "items", ":", "np", ".", "ndarray", "=", "None", ",", "axis", ":", "int", "=", "None", ",", "layers", ":", "Iterable", "=", "None", ",", "key", ":", "str", "=", "None", ",", "batch_size", ":", "int", "...
Scan across one axis and return batches of rows (columns) as LoomView objects Args ---- items: np.ndarray the indexes [0, 2, 13, ... ,973] of the rows/cols to include along the axis OR: boolean mask array giving the rows/cols to include axis: int 0:rows or 1:cols batch_size: int the chuncks returned at every element of the iterator layers: iterable if specified it will batch scan only across some of the layers of the loom file if layers == None, all layers will be scanned if layers == [""] or "", only the default layer will be scanned key: Name of primary key attribute. If specified, return the values sorted by the key Returns ------ Iterable that yields triplets of (ix, indexes, view) where ix: int first position / how many rows/cols have been yielded alredy indexes: np.ndarray[int] the indexes with the same numbering of the input args cells / genes (i.e. ``np.arange(len(ds.shape[axis]))``) this is ``ix + selection`` view: LoomView a view corresponding to the current chunk
[ "Scan", "across", "one", "axis", "and", "return", "batches", "of", "rows", "(", "columns", ")", "as", "LoomView", "objects" ]
python
train
mardix/Mocha
mocha/render.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/render.py#L104-L118
def json(func): """ Decorator to render as JSON :param func: :return: """ if inspect.isclass(func): apply_function_to_members(func, json) return func else: @functools.wraps(func) def decorated_view(*args, **kwargs): data = func(*args, **kwargs) return _build_response(data, jsonify) return decorated_view
[ "def", "json", "(", "func", ")", ":", "if", "inspect", ".", "isclass", "(", "func", ")", ":", "apply_function_to_members", "(", "func", ",", "json", ")", "return", "func", "else", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "decorated...
Decorator to render as JSON :param func: :return:
[ "Decorator", "to", "render", "as", "JSON", ":", "param", "func", ":", ":", "return", ":" ]
python
train
google/mobly
mobly/controllers/android_device_lib/service_manager.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device_lib/service_manager.py#L105-L112
def unregister_all(self): """Safely unregisters all active instances. Errors occurred here will be recorded but not raised. """ aliases = list(self._service_objects.keys()) for alias in aliases: self.unregister(alias)
[ "def", "unregister_all", "(", "self", ")", ":", "aliases", "=", "list", "(", "self", ".", "_service_objects", ".", "keys", "(", ")", ")", "for", "alias", "in", "aliases", ":", "self", ".", "unregister", "(", "alias", ")" ]
Safely unregisters all active instances. Errors occurred here will be recorded but not raised.
[ "Safely", "unregisters", "all", "active", "instances", "." ]
python
train
HazyResearch/metal
metal/contrib/info_extraction/utils.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/contrib/info_extraction/utils.py#L1-L76
def mark_entities(tokens, positions, markers=[], style="insert"): """Adds special markers around tokens at specific positions (e.g., entities) Args: tokens: A list of tokens (the sentence) positions: 1) A list of inclusive ranges (tuples) corresponding to the token ranges of the entities in order. (Assumes each entity has only one corresponding mention.) OR 2) A dict of lists with keys corresponding to mention indices and values corresponding to one or more inclusive ranges corresponding to that mention. (Allows entities to potentially have multiple mentions) markers: A list of strings (length of 2 * the number of entities) to use as markers of the entities. style: Where to apply the markers: 'insert': Insert the markers as new tokens before/after each entity 'concatenate': Prepend/append the markers to the first/last token of each entity If the tokens are going to be input to an LSTM, then it is usually best to use the 'insert' option; 'concatenate' may be better for viewing. Returns: toks: An extended list of tokens with markers around the mentions WARNING: if the marked token set will be used with pretrained embeddings, provide markers that will not result in UNK embeddings! Example: Input: (['The', 'cat', 'sat'], [(1,1)]) Output: ['The', '[[BEGIN0]]', 'cat', '[[END0]]', 'sat'] """ if markers and len(markers) != 2 * len(positions): msg = ( f"Expected len(markers) == 2 * len(positions), " f"but {len(markers)} != {2 * len(positions)}." ) raise ValueError(msg) toks = list(tokens) # markings will be of the form: # [(position, entity_idx), (position, entity_idx), ...] if isinstance(positions, list): markings = [(position, idx) for idx, position in enumerate(positions)] elif isinstance(positions, dict): markings = [] for idx, v in positions.items(): for position in v: markings.append((position, idx)) else: msg = ( f"Argument _positions_ must be a list or dict. " f"Instead, got {type(positions)}" ) raise ValueError(msg) markings = sorted(markings) for i, ((si, ei), idx) in enumerate(markings): if markers: start_marker = markers[2 * idx] end_marker = markers[2 * idx + 1] else: start_marker = f"[[BEGIN{idx}]]" end_marker = f"[[END{idx}]]" if style == "insert": toks.insert(si + 2 * i, start_marker) toks.insert(ei + 2 * (i + 1), end_marker) elif style == "concatenate": toks[si] = start_marker + toks[si] toks[ei] = toks[ei] + end_marker else: raise NotImplementedError return toks
[ "def", "mark_entities", "(", "tokens", ",", "positions", ",", "markers", "=", "[", "]", ",", "style", "=", "\"insert\"", ")", ":", "if", "markers", "and", "len", "(", "markers", ")", "!=", "2", "*", "len", "(", "positions", ")", ":", "msg", "=", "(...
Adds special markers around tokens at specific positions (e.g., entities) Args: tokens: A list of tokens (the sentence) positions: 1) A list of inclusive ranges (tuples) corresponding to the token ranges of the entities in order. (Assumes each entity has only one corresponding mention.) OR 2) A dict of lists with keys corresponding to mention indices and values corresponding to one or more inclusive ranges corresponding to that mention. (Allows entities to potentially have multiple mentions) markers: A list of strings (length of 2 * the number of entities) to use as markers of the entities. style: Where to apply the markers: 'insert': Insert the markers as new tokens before/after each entity 'concatenate': Prepend/append the markers to the first/last token of each entity If the tokens are going to be input to an LSTM, then it is usually best to use the 'insert' option; 'concatenate' may be better for viewing. Returns: toks: An extended list of tokens with markers around the mentions WARNING: if the marked token set will be used with pretrained embeddings, provide markers that will not result in UNK embeddings! Example: Input: (['The', 'cat', 'sat'], [(1,1)]) Output: ['The', '[[BEGIN0]]', 'cat', '[[END0]]', 'sat']
[ "Adds", "special", "markers", "around", "tokens", "at", "specific", "positions", "(", "e", ".", "g", ".", "entities", ")" ]
python
train
casastorta/python-sar
sar/parser.py
https://github.com/casastorta/python-sar/blob/e6d8bb86524102d677f37e985302fad34e3297c1/sar/parser.py#L39-L62
def load_file(self): """ Loads SAR format logfile in ASCII format (sarXX). :return: ``True`` if loading and parsing of file went fine, \ ``False`` if it failed (at any point) """ # We first split file into pieces searchunks = self._split_file() if searchunks: # And then we parse pieces into meaningful data usage = self._parse_file(searchunks) if 'CPU' in usage: return False self._sarinfo = usage del usage return True else: return False
[ "def", "load_file", "(", "self", ")", ":", "# We first split file into pieces", "searchunks", "=", "self", ".", "_split_file", "(", ")", "if", "searchunks", ":", "# And then we parse pieces into meaningful data", "usage", "=", "self", ".", "_parse_file", "(", "searchu...
Loads SAR format logfile in ASCII format (sarXX). :return: ``True`` if loading and parsing of file went fine, \ ``False`` if it failed (at any point)
[ "Loads", "SAR", "format", "logfile", "in", "ASCII", "format", "(", "sarXX", ")", ".", ":", "return", ":", "True", "if", "loading", "and", "parsing", "of", "file", "went", "fine", "\\", "False", "if", "it", "failed", "(", "at", "any", "point", ")" ]
python
train
openstack/horizon
openstack_auth/utils.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L182-L257
def get_websso_url(request, auth_url, websso_auth): """Return the keystone endpoint for initiating WebSSO. Generate the keystone WebSSO endpoint that will redirect the user to the login page of the federated identity provider. Based on the authentication type selected by the user in the login form, it will construct the keystone WebSSO endpoint. :param request: Django http request object. :type request: django.http.HttpRequest :param auth_url: Keystone endpoint configured in the horizon setting. If WEBSSO_KEYSTONE_URL is defined, its value will be used. Otherwise, the value is derived from: - OPENSTACK_KEYSTONE_URL - AVAILABLE_REGIONS :type auth_url: string :param websso_auth: Authentication type selected by the user from the login form. The value is derived from the horizon setting WEBSSO_CHOICES. :type websso_auth: string Example of horizon WebSSO setting:: WEBSSO_CHOICES = ( ("credentials", "Keystone Credentials"), ("oidc", "OpenID Connect"), ("saml2", "Security Assertion Markup Language"), ("acme_oidc", "ACME - OpenID Connect"), ("acme_saml2", "ACME - SAML2") ) WEBSSO_IDP_MAPPING = { "acme_oidc": ("acme", "oidc"), "acme_saml2": ("acme", "saml2") } } The value of websso_auth will be looked up in the WEBSSO_IDP_MAPPING dictionary, if a match is found it will return a IdP specific WebSSO endpoint using the values found in the mapping. The value in WEBSSO_IDP_MAPPING is expected to be a tuple formatted as (<idp_id>, <protocol_id>). Using the values found, a IdP/protocol specific URL will be constructed:: /auth/OS-FEDERATION/identity_providers/<idp_id> /protocols/<protocol_id>/websso If no value is found from the WEBSSO_IDP_MAPPING dictionary, it will treat the value as the global WebSSO protocol <protocol_id> and construct the WebSSO URL by:: /auth/OS-FEDERATION/websso/<protocol_id> :returns: Keystone WebSSO endpoint. :rtype: string """ origin = build_absolute_uri(request, '/auth/websso/') idp_mapping = getattr(settings, 'WEBSSO_IDP_MAPPING', {}) idp_id, protocol_id = idp_mapping.get(websso_auth, (None, websso_auth)) if idp_id: # Use the IDP specific WebSSO endpoint url = ('%s/auth/OS-FEDERATION/identity_providers/%s' '/protocols/%s/websso?origin=%s' % (auth_url, idp_id, protocol_id, origin)) else: # If no IDP mapping found for the identifier, # perform WebSSO by protocol. url = ('%s/auth/OS-FEDERATION/websso/%s?origin=%s' % (auth_url, protocol_id, origin)) return url
[ "def", "get_websso_url", "(", "request", ",", "auth_url", ",", "websso_auth", ")", ":", "origin", "=", "build_absolute_uri", "(", "request", ",", "'/auth/websso/'", ")", "idp_mapping", "=", "getattr", "(", "settings", ",", "'WEBSSO_IDP_MAPPING'", ",", "{", "}", ...
Return the keystone endpoint for initiating WebSSO. Generate the keystone WebSSO endpoint that will redirect the user to the login page of the federated identity provider. Based on the authentication type selected by the user in the login form, it will construct the keystone WebSSO endpoint. :param request: Django http request object. :type request: django.http.HttpRequest :param auth_url: Keystone endpoint configured in the horizon setting. If WEBSSO_KEYSTONE_URL is defined, its value will be used. Otherwise, the value is derived from: - OPENSTACK_KEYSTONE_URL - AVAILABLE_REGIONS :type auth_url: string :param websso_auth: Authentication type selected by the user from the login form. The value is derived from the horizon setting WEBSSO_CHOICES. :type websso_auth: string Example of horizon WebSSO setting:: WEBSSO_CHOICES = ( ("credentials", "Keystone Credentials"), ("oidc", "OpenID Connect"), ("saml2", "Security Assertion Markup Language"), ("acme_oidc", "ACME - OpenID Connect"), ("acme_saml2", "ACME - SAML2") ) WEBSSO_IDP_MAPPING = { "acme_oidc": ("acme", "oidc"), "acme_saml2": ("acme", "saml2") } } The value of websso_auth will be looked up in the WEBSSO_IDP_MAPPING dictionary, if a match is found it will return a IdP specific WebSSO endpoint using the values found in the mapping. The value in WEBSSO_IDP_MAPPING is expected to be a tuple formatted as (<idp_id>, <protocol_id>). Using the values found, a IdP/protocol specific URL will be constructed:: /auth/OS-FEDERATION/identity_providers/<idp_id> /protocols/<protocol_id>/websso If no value is found from the WEBSSO_IDP_MAPPING dictionary, it will treat the value as the global WebSSO protocol <protocol_id> and construct the WebSSO URL by:: /auth/OS-FEDERATION/websso/<protocol_id> :returns: Keystone WebSSO endpoint. :rtype: string
[ "Return", "the", "keystone", "endpoint", "for", "initiating", "WebSSO", "." ]
python
train
gem/oq-engine
openquake/hmtk/plotting/mapping.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/plotting/mapping.py#L177-L184
def savemap(self, filename, filetype='png', papertype="a4"): """ Save the figure """ self.fig.savefig(filename, dpi=self.dpi, format=filetype, papertype=papertype)
[ "def", "savemap", "(", "self", ",", "filename", ",", "filetype", "=", "'png'", ",", "papertype", "=", "\"a4\"", ")", ":", "self", ".", "fig", ".", "savefig", "(", "filename", ",", "dpi", "=", "self", ".", "dpi", ",", "format", "=", "filetype", ",", ...
Save the figure
[ "Save", "the", "figure" ]
python
train
pypa/pipenv
pipenv/vendor/passa/internals/_pip.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/passa/internals/_pip.py#L113-L130
def _convert_hashes(values): """Convert Pipfile.lock hash lines into InstallRequirement option format. The option format uses a str-list mapping. Keys are hash algorithms, and the list contains all values of that algorithm. """ hashes = {} if not values: return hashes for value in values: try: name, value = value.split(":", 1) except ValueError: name = "sha256" if name not in hashes: hashes[name] = [] hashes[name].append(value) return hashes
[ "def", "_convert_hashes", "(", "values", ")", ":", "hashes", "=", "{", "}", "if", "not", "values", ":", "return", "hashes", "for", "value", "in", "values", ":", "try", ":", "name", ",", "value", "=", "value", ".", "split", "(", "\":\"", ",", "1", "...
Convert Pipfile.lock hash lines into InstallRequirement option format. The option format uses a str-list mapping. Keys are hash algorithms, and the list contains all values of that algorithm.
[ "Convert", "Pipfile", ".", "lock", "hash", "lines", "into", "InstallRequirement", "option", "format", "." ]
python
train
nion-software/nionswift
nion/typeshed/API_1_0.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/typeshed/API_1_0.py#L1094-L1108
def create_data_and_metadata(self, data: numpy.ndarray, intensity_calibration: Calibration.Calibration=None, dimensional_calibrations: typing.List[Calibration.Calibration]=None, metadata: dict=None, timestamp: str=None, data_descriptor: DataAndMetadata.DataDescriptor=None) -> DataAndMetadata.DataAndMetadata: """Create a data_and_metadata object from data. :param data: an ndarray of data. :param intensity_calibration: An optional calibration object. :param dimensional_calibrations: An optional list of calibration objects. :param metadata: A dict of metadata. :param timestamp: A datetime object. :param data_descriptor: A data descriptor describing the dimensions. .. versionadded:: 1.0 Scriptable: Yes """ ...
[ "def", "create_data_and_metadata", "(", "self", ",", "data", ":", "numpy", ".", "ndarray", ",", "intensity_calibration", ":", "Calibration", ".", "Calibration", "=", "None", ",", "dimensional_calibrations", ":", "typing", ".", "List", "[", "Calibration", ".", "C...
Create a data_and_metadata object from data. :param data: an ndarray of data. :param intensity_calibration: An optional calibration object. :param dimensional_calibrations: An optional list of calibration objects. :param metadata: A dict of metadata. :param timestamp: A datetime object. :param data_descriptor: A data descriptor describing the dimensions. .. versionadded:: 1.0 Scriptable: Yes
[ "Create", "a", "data_and_metadata", "object", "from", "data", "." ]
python
train
saltstack/salt
salt/modules/glassfish.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glassfish.py#L361-L386
def create_connector_resource(name, server=None, **kwargs): ''' Create a connection resource ''' defaults = { 'description': '', 'enabled': True, 'id': name, 'poolName': '', 'objectType': 'user', 'target': 'server' } # Data = defaults + merge kwargs + poolname data = defaults data.update(kwargs) if not data['poolName']: raise CommandExecutionError('No pool name!') # Fix for lowercase vs camelCase naming differences for key, value in list(data.items()): del data[key] data[key.lower()] = value return _create_element(name, 'resources/connector-resource', data, server)
[ "def", "create_connector_resource", "(", "name", ",", "server", "=", "None", ",", "*", "*", "kwargs", ")", ":", "defaults", "=", "{", "'description'", ":", "''", ",", "'enabled'", ":", "True", ",", "'id'", ":", "name", ",", "'poolName'", ":", "''", ","...
Create a connection resource
[ "Create", "a", "connection", "resource" ]
python
train
jjgomera/iapws
iapws/humidAir.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/humidAir.py#L729-L761
def _eq(self, T, P): """Procedure for calculate the composition in saturation state Parameters ---------- T : float Temperature [K] P : float Pressure [MPa] Returns ------- Asat : float Saturation mass fraction of dry air in humid air [kg/kg] """ if T <= 273.16: ice = _Ice(T, P) gw = ice["g"] else: water = IAPWS95(T=T, P=P) gw = water.g def f(parr): rho, a = parr if a > 1: a = 1 fa = self._fav(T, rho, a) muw = fa["fir"]+rho*fa["fird"]-a*fa["fira"] return gw-muw, rho**2*fa["fird"]/1000-P rinput = fsolve(f, [1, 0.95], full_output=True) Asat = rinput[0][1] return Asat
[ "def", "_eq", "(", "self", ",", "T", ",", "P", ")", ":", "if", "T", "<=", "273.16", ":", "ice", "=", "_Ice", "(", "T", ",", "P", ")", "gw", "=", "ice", "[", "\"g\"", "]", "else", ":", "water", "=", "IAPWS95", "(", "T", "=", "T", ",", "P",...
Procedure for calculate the composition in saturation state Parameters ---------- T : float Temperature [K] P : float Pressure [MPa] Returns ------- Asat : float Saturation mass fraction of dry air in humid air [kg/kg]
[ "Procedure", "for", "calculate", "the", "composition", "in", "saturation", "state" ]
python
train
toomore/goristock
grs/all_portf.py
https://github.com/toomore/goristock/blob/e61f57f11a626cfbc4afbf66337fd9d1c51e3e71/grs/all_portf.py#L110-L112
def B4PB(self): ''' 判斷是否為四大買點 ''' return self.ckMinsGLI and (self.B1 or self.B2 or self.B3 or self.B4)
[ "def", "B4PB", "(", "self", ")", ":", "return", "self", ".", "ckMinsGLI", "and", "(", "self", ".", "B1", "or", "self", ".", "B2", "or", "self", ".", "B3", "or", "self", ".", "B4", ")" ]
判斷是否為四大買點
[ "判斷是否為四大買點" ]
python
train
diux-dev/ncluster
ncluster/aws_util.py
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L415-L420
def validate_aws_name(name): """Validate resource name using AWS name restrictions from # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions""" assert len(name) <= 127 # disallow unicode characters to avoid pain assert name == name.encode('ascii').decode('ascii') assert aws_name_regexp.match(name)
[ "def", "validate_aws_name", "(", "name", ")", ":", "assert", "len", "(", "name", ")", "<=", "127", "# disallow unicode characters to avoid pain", "assert", "name", "==", "name", ".", "encode", "(", "'ascii'", ")", ".", "decode", "(", "'ascii'", ")", "assert", ...
Validate resource name using AWS name restrictions from # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
[ "Validate", "resource", "name", "using", "AWS", "name", "restrictions", "from", "#", "http", ":", "//", "docs", ".", "aws", ".", "amazon", ".", "com", "/", "AWSEC2", "/", "latest", "/", "UserGuide", "/", "Using_Tags", ".", "html#tag", "-", "restrictions" ]
python
train
ray-project/ray
python/ray/actor.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L869-L884
def get_checkpoints_for_actor(actor_id): """Get the available checkpoints for the given actor ID, return a list sorted by checkpoint timestamp in descending order. """ checkpoint_info = ray.worker.global_state.actor_checkpoint_info(actor_id) if checkpoint_info is None: return [] checkpoints = [ Checkpoint(checkpoint_id, timestamp) for checkpoint_id, timestamp in zip(checkpoint_info["CheckpointIds"], checkpoint_info["Timestamps"]) ] return sorted( checkpoints, key=lambda checkpoint: checkpoint.timestamp, reverse=True, )
[ "def", "get_checkpoints_for_actor", "(", "actor_id", ")", ":", "checkpoint_info", "=", "ray", ".", "worker", ".", "global_state", ".", "actor_checkpoint_info", "(", "actor_id", ")", "if", "checkpoint_info", "is", "None", ":", "return", "[", "]", "checkpoints", "...
Get the available checkpoints for the given actor ID, return a list sorted by checkpoint timestamp in descending order.
[ "Get", "the", "available", "checkpoints", "for", "the", "given", "actor", "ID", "return", "a", "list", "sorted", "by", "checkpoint", "timestamp", "in", "descending", "order", "." ]
python
train
spyder-ide/spyder
spyder/plugins/console/widgets/shell.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/widgets/shell.py#L768-L777
def _key_question(self, text): """Action for '?'""" if self.get_current_line_to_cursor(): last_obj = self.get_last_obj() if last_obj and not last_obj.isdigit(): self.show_object_info(last_obj) self.insert_text(text) # In case calltip and completion are shown at the same time: if self.is_completion_widget_visible(): self.completion_text += '?'
[ "def", "_key_question", "(", "self", ",", "text", ")", ":", "if", "self", ".", "get_current_line_to_cursor", "(", ")", ":", "last_obj", "=", "self", ".", "get_last_obj", "(", ")", "if", "last_obj", "and", "not", "last_obj", ".", "isdigit", "(", ")", ":",...
Action for '?
[ "Action", "for", "?" ]
python
train
davidmogar/cucco
cucco/batch.py
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L200-L208
def on_modified(self, event): """Function called everytime a new file is modified. Args: event: Event to process. """ self._logger.debug('Detected modify event on watched path: %s', event.src_path) self._process_event(event)
[ "def", "on_modified", "(", "self", ",", "event", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Detected modify event on watched path: %s'", ",", "event", ".", "src_path", ")", "self", ".", "_process_event", "(", "event", ")" ]
Function called everytime a new file is modified. Args: event: Event to process.
[ "Function", "called", "everytime", "a", "new", "file", "is", "modified", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/provenance/system.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/system.py#L77-L99
def _torque_info(queue): """Return machine information for a torque job scheduler using pbsnodes. To identify which host to use it tries to parse available hosts from qstat -Qf `acl_hosts`. If found, it uses these and gets the first node from pbsnodes matching to the list. If no attached hosts are available, it uses the first host found from pbsnodes. """ nodes = _torque_queue_nodes(queue) pbs_out = subprocess.check_output(["pbsnodes"]).decode() info = {} for i, line in enumerate(pbs_out.split("\n")): if i == 0 and len(nodes) == 0: info["name"] = line.strip() elif line.startswith(nodes): info["name"] = line.strip() elif info.get("name"): if line.strip().startswith("np = "): info["cores"] = int(line.replace("np = ", "").strip()) elif line.strip().startswith("status = "): mem = [x for x in pbs_out.split(",") if x.startswith("physmem=")][0] info["memory"] = float(mem.split("=")[1].rstrip("kb")) / 1048576.0 return [info]
[ "def", "_torque_info", "(", "queue", ")", ":", "nodes", "=", "_torque_queue_nodes", "(", "queue", ")", "pbs_out", "=", "subprocess", ".", "check_output", "(", "[", "\"pbsnodes\"", "]", ")", ".", "decode", "(", ")", "info", "=", "{", "}", "for", "i", ",...
Return machine information for a torque job scheduler using pbsnodes. To identify which host to use it tries to parse available hosts from qstat -Qf `acl_hosts`. If found, it uses these and gets the first node from pbsnodes matching to the list. If no attached hosts are available, it uses the first host found from pbsnodes.
[ "Return", "machine", "information", "for", "a", "torque", "job", "scheduler", "using", "pbsnodes", "." ]
python
train
clalancette/pycdlib
pycdlib/udf.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L2383-L2412
def new(self): # type: () -> None ''' A method to create a new UDF Logical Volume Integrity Descriptor. Parameters: None. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Integrity Descriptor already initialized') self.desc_tag = UDFTag() self.desc_tag.new(9) # FIXME: we should let the user set serial_number self.recording_date = UDFTimestamp() self.recording_date.new() self.length_impl_use = 46 self.free_space_table = 0 # FIXME: let the user set this self.size_table = 3 self.logical_volume_contents_use = UDFLogicalVolumeHeaderDescriptor() self.logical_volume_contents_use.new() self.logical_volume_impl_use = UDFLogicalVolumeImplementationUse() self.logical_volume_impl_use.new() self._initialized = True
[ "def", "new", "(", "self", ")", ":", "# type: () -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'UDF Logical Volume Integrity Descriptor already initialized'", ")", "self", ".", "desc_tag", "=", "UDFTag"...
A method to create a new UDF Logical Volume Integrity Descriptor. Parameters: None. Returns: Nothing.
[ "A", "method", "to", "create", "a", "new", "UDF", "Logical", "Volume", "Integrity", "Descriptor", "." ]
python
train
monarch-initiative/dipper
dipper/sources/KEGG.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L137-L173
def parse(self, limit=None): """ :param limit: :return: """ if limit is not None: LOG.info("Only parsing first %s rows fo each file", str(limit)) LOG.info("Parsing files...") if self.test_only: self.test_mode = True self._process_diseases(limit) self._process_genes(limit) self._process_genes_kegg2ncbi(limit) self._process_omim2gene(limit) self._process_omim2disease(limit) self._process_kegg_disease2gene(limit) self._process_pathways(limit) self._process_pathway_pubmed(limit) # self._process_pathway_pathway(limit) self._process_pathway_disease(limit) self._process_pathway_ko(limit) self._process_ortholog_classes(limit) # TODO add in when refactoring for #141 # for f in ['hsa_orthologs', 'mmu_orthologs', 'rno_orthologs', # 'dme_orthologs','dre_orthologs','cel_orthologs']: # file = '/'.join((self.rawdir, self.files[f]['file'])) # self._process_orthologs(file, limit) # DONE # LOG.info("Finished parsing") return
[ "def", "parse", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "limit", "is", "not", "None", ":", "LOG", ".", "info", "(", "\"Only parsing first %s rows fo each file\"", ",", "str", "(", "limit", ")", ")", "LOG", ".", "info", "(", "\"Parsing fil...
:param limit: :return:
[ ":", "param", "limit", ":", ":", "return", ":" ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/__init__.py#L236-L257
def _set_network(self, v, load=False): """ Setter method for network, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/network (list) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("network_ipv4_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-ipv4-address', extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """network must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("network_ipv4_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-ipv4-address', extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""", }) self.__network = t if hasattr(self, '_set'): self._set()
[ "def", "_set_network", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base",...
Setter method for network, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/network (list) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly.
[ "Setter", "method", "for", "network", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "router", "/", "router_bgp", "/", "address_family", "/", "ipv4", "/", "ipv4_unicast", "/", "default_vrf", "/", "network", "(", "list", ")", "If", "this", "var...
python
train
F5Networks/f5-common-python
f5/multi_device/device_group.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/multi_device/device_group.py#L104-L116
def _set_attributes(self, **kwargs): '''Set instance attributes based on kwargs :param kwargs: dict -- kwargs to set as attributes ''' try: self.devices = kwargs['devices'][:] self.name = kwargs['device_group_name'] self.type = kwargs['device_group_type'] self.partition = kwargs['device_group_partition'] except KeyError as ex: raise MissingRequiredDeviceGroupParameter(ex)
[ "def", "_set_attributes", "(", "self", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "devices", "=", "kwargs", "[", "'devices'", "]", "[", ":", "]", "self", ".", "name", "=", "kwargs", "[", "'device_group_name'", "]", "self", ".", "type...
Set instance attributes based on kwargs :param kwargs: dict -- kwargs to set as attributes
[ "Set", "instance", "attributes", "based", "on", "kwargs" ]
python
train
sendgrid/sendgrid-python
sendgrid/helpers/mail/mail.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/mail.py#L714-L733
def add_content(self, content, mime_type=None): """Add content to the email :param contents: Content to be added to the email :type contents: Content :param mime_type: Override the mime type :type mime_type: MimeType, str """ if isinstance(content, str): content = Content(mime_type, content) # Content of mime type text/plain must always come first if content.mime_type == "text/plain": self._contents = self._ensure_insert(content, self._contents) else: if self._contents: index = len(self._contents) else: index = 0 self._contents = self._ensure_append( content, self._contents, index=index)
[ "def", "add_content", "(", "self", ",", "content", ",", "mime_type", "=", "None", ")", ":", "if", "isinstance", "(", "content", ",", "str", ")", ":", "content", "=", "Content", "(", "mime_type", ",", "content", ")", "# Content of mime type text/plain must alwa...
Add content to the email :param contents: Content to be added to the email :type contents: Content :param mime_type: Override the mime type :type mime_type: MimeType, str
[ "Add", "content", "to", "the", "email" ]
python
train
apache/incubator-mxnet
python/mxnet/ndarray/ndarray.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2050-L2094
def copyto(self, other): """Copies the value of this array to another array. If ``other`` is a ``NDArray`` object, then ``other.shape`` and ``self.shape`` should be the same. This function copies the value from ``self`` to ``other``. If ``other`` is a context, a new ``NDArray`` will be first created on the target context, and the value of ``self`` is copied. Parameters ---------- other : NDArray or Context The destination array or context. Returns ------- NDArray, CSRNDArray or RowSparseNDArray The copied array. If ``other`` is an ``NDArray``, then the return value and ``other`` will point to the same ``NDArray``. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.zeros((2,3), mx.gpu(0)) >>> z = x.copyto(y) >>> z is y True >>> y.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.copyto(mx.gpu(0)) <NDArray 2x3 @gpu(0)> """ if isinstance(other, NDArray): if other.handle is self.handle: warnings.warn('You are attempting to copy an array to itself', RuntimeWarning) return False return _internal._copyto(self, out=other) elif isinstance(other, Context): hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype)) return _internal._copyto(self, out=hret) else: raise TypeError('copyto does not support type ' + str(type(other)))
[ "def", "copyto", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "NDArray", ")", ":", "if", "other", ".", "handle", "is", "self", ".", "handle", ":", "warnings", ".", "warn", "(", "'You are attempting to copy an array to itself'",...
Copies the value of this array to another array. If ``other`` is a ``NDArray`` object, then ``other.shape`` and ``self.shape`` should be the same. This function copies the value from ``self`` to ``other``. If ``other`` is a context, a new ``NDArray`` will be first created on the target context, and the value of ``self`` is copied. Parameters ---------- other : NDArray or Context The destination array or context. Returns ------- NDArray, CSRNDArray or RowSparseNDArray The copied array. If ``other`` is an ``NDArray``, then the return value and ``other`` will point to the same ``NDArray``. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.zeros((2,3), mx.gpu(0)) >>> z = x.copyto(y) >>> z is y True >>> y.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.copyto(mx.gpu(0)) <NDArray 2x3 @gpu(0)>
[ "Copies", "the", "value", "of", "this", "array", "to", "another", "array", "." ]
python
train
pyviz/holoviews
holoviews/core/io.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/io.py#L124-L129
def _filename(self_or_cls, filename): "Add the file extension if not already present" if not filename.endswith(self_or_cls.file_ext): return '%s.%s' % (filename, self_or_cls.file_ext) else: return filename
[ "def", "_filename", "(", "self_or_cls", ",", "filename", ")", ":", "if", "not", "filename", ".", "endswith", "(", "self_or_cls", ".", "file_ext", ")", ":", "return", "'%s.%s'", "%", "(", "filename", ",", "self_or_cls", ".", "file_ext", ")", "else", ":", ...
Add the file extension if not already present
[ "Add", "the", "file", "extension", "if", "not", "already", "present" ]
python
train
StagPython/StagPy
stagpy/stagyyparsers.py
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/stagyyparsers.py#L576-L582
def _get_field(xdmf_file, data_item): """Extract field from data item.""" shp = _get_dim(data_item) h5file, group = data_item.text.strip().split(':/', 1) icore = int(group.split('_')[-2]) - 1 fld = _read_group_h5(xdmf_file.parent / h5file, group).reshape(shp) return icore, fld
[ "def", "_get_field", "(", "xdmf_file", ",", "data_item", ")", ":", "shp", "=", "_get_dim", "(", "data_item", ")", "h5file", ",", "group", "=", "data_item", ".", "text", ".", "strip", "(", ")", ".", "split", "(", "':/'", ",", "1", ")", "icore", "=", ...
Extract field from data item.
[ "Extract", "field", "from", "data", "item", "." ]
python
train
casacore/python-casacore
casacore/images/image.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/image.py#L237-L239
def attrget(self, groupname, attrname, rownr): """Get the value of an attribute in the given row in a group.""" return self._attrget(groupname, attrname, rownr)
[ "def", "attrget", "(", "self", ",", "groupname", ",", "attrname", ",", "rownr", ")", ":", "return", "self", ".", "_attrget", "(", "groupname", ",", "attrname", ",", "rownr", ")" ]
Get the value of an attribute in the given row in a group.
[ "Get", "the", "value", "of", "an", "attribute", "in", "the", "given", "row", "in", "a", "group", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L3908-L3936
def break_at(self, pid, address, action = None): """ Sets a code breakpoint at the given process and address. If instead of an address you pass a label, the breakpoint may be deferred until the DLL it points to is loaded. @see: L{stalk_at}, L{dont_break_at} @type pid: int @param pid: Process global ID. @type address: int or str @param address: Memory address of code instruction to break at. It can be an integer value for the actual address or a string with a label to be resolved. @type action: function @param action: (Optional) Action callback function. See L{define_code_breakpoint} for more details. @rtype: bool @return: C{True} if the breakpoint was set immediately, or C{False} if it was deferred. """ bp = self.__set_break(pid, address, action, oneshot = False) return bp is not None
[ "def", "break_at", "(", "self", ",", "pid", ",", "address", ",", "action", "=", "None", ")", ":", "bp", "=", "self", ".", "__set_break", "(", "pid", ",", "address", ",", "action", ",", "oneshot", "=", "False", ")", "return", "bp", "is", "not", "Non...
Sets a code breakpoint at the given process and address. If instead of an address you pass a label, the breakpoint may be deferred until the DLL it points to is loaded. @see: L{stalk_at}, L{dont_break_at} @type pid: int @param pid: Process global ID. @type address: int or str @param address: Memory address of code instruction to break at. It can be an integer value for the actual address or a string with a label to be resolved. @type action: function @param action: (Optional) Action callback function. See L{define_code_breakpoint} for more details. @rtype: bool @return: C{True} if the breakpoint was set immediately, or C{False} if it was deferred.
[ "Sets", "a", "code", "breakpoint", "at", "the", "given", "process", "and", "address", "." ]
python
train
mpg-age-bioinformatics/AGEpy
AGEpy/rbiom.py
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/rbiom.py#L16-L26
def RdatabasesBM(host=rbiomart_host): """ Lists BioMart databases through a RPY2 connection. :param host: address of the host server, default='www.ensembl.org' :returns: nothing """ biomaRt = importr("biomaRt") print(biomaRt.listMarts(host=host))
[ "def", "RdatabasesBM", "(", "host", "=", "rbiomart_host", ")", ":", "biomaRt", "=", "importr", "(", "\"biomaRt\"", ")", "print", "(", "biomaRt", ".", "listMarts", "(", "host", "=", "host", ")", ")" ]
Lists BioMart databases through a RPY2 connection. :param host: address of the host server, default='www.ensembl.org' :returns: nothing
[ "Lists", "BioMart", "databases", "through", "a", "RPY2", "connection", "." ]
python
train
Netuitive/netuitive-client-python
netuitive/client.py
https://github.com/Netuitive/netuitive-client-python/blob/16426ade6a5dc0888ce978c97b02663a9713fc16/netuitive/client.py#L159-L195
def post_check(self, check): """ :param check: Check to post to Metricly :type check: object """ if self.disabled is True: logging.error('Posting has been disabled. ' 'See previous errors for details.') return(False) url = self.checkurl + '/' \ + check.name + '/' \ + check.elementId + '/' \ + str(check.ttl) headers = {'User-Agent': self.agent} try: request = urllib2.Request( url, data='', headers=headers) resp = self._repeat_request(request, self.connection_timeout) logging.debug("Response code: %d", resp.getcode()) resp.close() return(True) except urllib2.HTTPError as e: logging.debug("Response code: %d", e.code) if e.code in self.kill_codes: self.disabled = True logging.exception('Posting has been disabled.' 'See previous errors for details.') else: logging.exception( 'HTTPError posting payload to api ingest endpoint' + ' (%s): %s', url, e)
[ "def", "post_check", "(", "self", ",", "check", ")", ":", "if", "self", ".", "disabled", "is", "True", ":", "logging", ".", "error", "(", "'Posting has been disabled. '", "'See previous errors for details.'", ")", "return", "(", "False", ")", "url", "=", "self...
:param check: Check to post to Metricly :type check: object
[ ":", "param", "check", ":", "Check", "to", "post", "to", "Metricly", ":", "type", "check", ":", "object" ]
python
train
gvanderheide/discreteMarkovChain
discreteMarkovChain/markovChain.py
https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L341-L349
def convertToRateMatrix(self, Q): """ Converts the initial matrix to a rate matrix. We make all rows in Q sum to zero by subtracting the row sums from the diagonal. """ rowSums = Q.sum(axis=1).getA1() idxRange = np.arange(Q.shape[0]) Qdiag = coo_matrix((rowSums,(idxRange,idxRange)),shape=Q.shape).tocsr() return Q-Qdiag
[ "def", "convertToRateMatrix", "(", "self", ",", "Q", ")", ":", "rowSums", "=", "Q", ".", "sum", "(", "axis", "=", "1", ")", ".", "getA1", "(", ")", "idxRange", "=", "np", ".", "arange", "(", "Q", ".", "shape", "[", "0", "]", ")", "Qdiag", "=", ...
Converts the initial matrix to a rate matrix. We make all rows in Q sum to zero by subtracting the row sums from the diagonal.
[ "Converts", "the", "initial", "matrix", "to", "a", "rate", "matrix", ".", "We", "make", "all", "rows", "in", "Q", "sum", "to", "zero", "by", "subtracting", "the", "row", "sums", "from", "the", "diagonal", "." ]
python
train
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L4646-L4658
def is_ready(self, shutit_module_obj): """Returns true if this module is ready to be built. Caches the result (as it's assumed not to change during the build). """ shutit_global.shutit_global_object.yield_to_draw() if shutit_module_obj.module_id in self.get_current_shutit_pexpect_session_environment().modules_ready: self.log('is_ready: returning True from cache',level=logging.DEBUG) return True ready = shutit_module_obj.check_ready(self) if ready: self.get_current_shutit_pexpect_session_environment().modules_ready.append(shutit_module_obj.module_id) return True return False
[ "def", "is_ready", "(", "self", ",", "shutit_module_obj", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "if", "shutit_module_obj", ".", "module_id", "in", "self", ".", "get_current_shutit_pexpect_session_environment", "(", ")"...
Returns true if this module is ready to be built. Caches the result (as it's assumed not to change during the build).
[ "Returns", "true", "if", "this", "module", "is", "ready", "to", "be", "built", ".", "Caches", "the", "result", "(", "as", "it", "s", "assumed", "not", "to", "change", "during", "the", "build", ")", "." ]
python
train
myint/rstcheck
rstcheck.py
https://github.com/myint/rstcheck/blob/2f975906b75f3b88d501ef3b13d213815cf7079a/rstcheck.py#L736-L741
def visit_paragraph(self, node): """Check syntax of reStructuredText.""" find = re.search(r'\[[^\]]+\]\([^\)]+\)', node.rawsource) if find is not None: self.document.reporter.warning( '(rst) Link is formatted in Markdown style.', base_node=node)
[ "def", "visit_paragraph", "(", "self", ",", "node", ")", ":", "find", "=", "re", ".", "search", "(", "r'\\[[^\\]]+\\]\\([^\\)]+\\)'", ",", "node", ".", "rawsource", ")", "if", "find", "is", "not", "None", ":", "self", ".", "document", ".", "reporter", "....
Check syntax of reStructuredText.
[ "Check", "syntax", "of", "reStructuredText", "." ]
python
train
Yubico/python-pyhsm
pyhsm/util.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/util.py#L38-L55
def key_handle_to_int(this): """ Turn "123" into 123 and "KSM1" into 827151179 (0x314d534b, 'K' = 0x4b, S = '0x53', M = 0x4d). YHSM is little endian, so this makes the bytes KSM1 appear in the most human readable form in packet traces. """ try: num = int(this) return num except ValueError: if this[:2] == "0x": return int(this, 16) if (len(this) == 4): num = struct.unpack('<I', this)[0] return num raise pyhsm.exception.YHSM_Error("Could not parse key_handle '%s'" % (this))
[ "def", "key_handle_to_int", "(", "this", ")", ":", "try", ":", "num", "=", "int", "(", "this", ")", "return", "num", "except", "ValueError", ":", "if", "this", "[", ":", "2", "]", "==", "\"0x\"", ":", "return", "int", "(", "this", ",", "16", ")", ...
Turn "123" into 123 and "KSM1" into 827151179 (0x314d534b, 'K' = 0x4b, S = '0x53', M = 0x4d). YHSM is little endian, so this makes the bytes KSM1 appear in the most human readable form in packet traces.
[ "Turn", "123", "into", "123", "and", "KSM1", "into", "827151179", "(", "0x314d534b", "K", "=", "0x4b", "S", "=", "0x53", "M", "=", "0x4d", ")", "." ]
python
train
sony/nnabla
python/src/nnabla/utils/image_utils/pil_utils.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/image_utils/pil_utils.py#L74-L115
def imread(path, grayscale=False, size=None, interpolate="bilinear", channel_first=False, as_uint16=False, num_channels=-1): """ Read image by PIL module. Notice that PIL only supports uint8 for RGB (not uint16). So this imread function returns only uint8 array for both RGB and gray-scale. (Currently ignore "I" mode for gray-scale (32bit integer).) Args: path (str or 'file object'): File path or object to read. grayscale (bool): size (tupple of int): (width, height). If None, output img shape depends on the files to read. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel). interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. as_uint16 (bool): If you specify this argument, you can use only False for pil backend. num_channels (int): channel size of output array. Default is -1 which preserves raw image shape. Returns: numpy.ndarray """ if as_uint16: raise ValueError("pillow only supports uint8 for RGB image." " If you want to load image as uint16," " install pypng or cv2 and" " nnabla.utils.image_utils automatically change backend to use these module.") _imread_before(grayscale, num_channels) pil_img = Image.open(path, mode="r") img = pil_image_to_ndarray(pil_img, grayscale, num_channels) return _imread_after(img, size, interpolate, channel_first, imresize)
[ "def", "imread", "(", "path", ",", "grayscale", "=", "False", ",", "size", "=", "None", ",", "interpolate", "=", "\"bilinear\"", ",", "channel_first", "=", "False", ",", "as_uint16", "=", "False", ",", "num_channels", "=", "-", "1", ")", ":", "if", "as...
Read image by PIL module. Notice that PIL only supports uint8 for RGB (not uint16). So this imread function returns only uint8 array for both RGB and gray-scale. (Currently ignore "I" mode for gray-scale (32bit integer).) Args: path (str or 'file object'): File path or object to read. grayscale (bool): size (tupple of int): (width, height). If None, output img shape depends on the files to read. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel). interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. as_uint16 (bool): If you specify this argument, you can use only False for pil backend. num_channels (int): channel size of output array. Default is -1 which preserves raw image shape. Returns: numpy.ndarray
[ "Read", "image", "by", "PIL", "module", ".", "Notice", "that", "PIL", "only", "supports", "uint8", "for", "RGB", "(", "not", "uint16", ")", ".", "So", "this", "imread", "function", "returns", "only", "uint8", "array", "for", "both", "RGB", "and", "gray",...
python
train
PredixDev/predixpy
predix/security/uaa.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/security/uaa.py#L226-L239
def logout(self): """ Log currently authenticated user out, invalidating any existing tokens. """ # Remove token from local cache # MAINT: need to expire token on server data = self._read_uaa_cache() if self.uri in data: for client in data[self.uri]: if client['id'] == self.client['id']: data[self.uri].remove(client) with open(self._cache_path, 'w') as output: output.write(json.dumps(data, sort_keys=True, indent=4))
[ "def", "logout", "(", "self", ")", ":", "# Remove token from local cache", "# MAINT: need to expire token on server", "data", "=", "self", ".", "_read_uaa_cache", "(", ")", "if", "self", ".", "uri", "in", "data", ":", "for", "client", "in", "data", "[", "self", ...
Log currently authenticated user out, invalidating any existing tokens.
[ "Log", "currently", "authenticated", "user", "out", "invalidating", "any", "existing", "tokens", "." ]
python
train
limix/glimix-core
glimix_core/lmm/_lmm.py
https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/lmm/_lmm.py#L530-L547
def _lml_arbitrary_scale(self): """ Log of the marginal likelihood for arbitrary scale. Returns ------- lml : float Log of the marginal likelihood. """ s = self.scale D = self._D n = len(self._y) lml = -self._df * log2pi - n * log(s) lml -= sum(npsum(log(d)) for d in D) d = (mTQ - yTQ for (mTQ, yTQ) in zip(self._mTQ, self._yTQ)) lml -= sum((i / j) @ i for (i, j) in zip(d, D)) / s return lml / 2
[ "def", "_lml_arbitrary_scale", "(", "self", ")", ":", "s", "=", "self", ".", "scale", "D", "=", "self", ".", "_D", "n", "=", "len", "(", "self", ".", "_y", ")", "lml", "=", "-", "self", ".", "_df", "*", "log2pi", "-", "n", "*", "log", "(", "s...
Log of the marginal likelihood for arbitrary scale. Returns ------- lml : float Log of the marginal likelihood.
[ "Log", "of", "the", "marginal", "likelihood", "for", "arbitrary", "scale", "." ]
python
valid
J535D165/recordlinkage
recordlinkage/algorithms/nb_sklearn.py
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L458-L490
def _init_parameters_random(self, X_bin): """Initialise parameters for unsupervised learning. """ _, n_features = X_bin.shape # The parameter class_log_prior_ has shape (2,). The values represent # 'match' and 'non-match'. rand_vals = np.random.rand(2) class_prior = rand_vals / np.sum(rand_vals) # make empty array of feature log probs # dimensions 2xn_features feature_prob = np.zeros((2, n_features)) feat_i = 0 for i, bin in enumerate(self._binarizers): bin_len = bin.classes_.shape[0] rand_vals_0 = np.random.rand(bin_len) feature_prob[0, feat_i:feat_i + bin_len] = \ rand_vals_0 / np.sum(rand_vals_0) rand_vals_1 = np.random.rand(bin_len) feature_prob[1, feat_i:feat_i + bin_len] = \ rand_vals_1 / np.sum(rand_vals_1) feat_i += bin_len return np.log(class_prior), np.log(feature_prob)
[ "def", "_init_parameters_random", "(", "self", ",", "X_bin", ")", ":", "_", ",", "n_features", "=", "X_bin", ".", "shape", "# The parameter class_log_prior_ has shape (2,). The values represent", "# 'match' and 'non-match'.", "rand_vals", "=", "np", ".", "random", ".", ...
Initialise parameters for unsupervised learning.
[ "Initialise", "parameters", "for", "unsupervised", "learning", "." ]
python
train
blink1073/oct2py
oct2py/core.py
https://github.com/blink1073/oct2py/blob/bfc69d2168ae3d98258f95bbc55a858c21836b58/oct2py/core.py#L505-L521
def restart(self): """Restart an Octave session in a clean state """ if self._engine: self._engine.repl.terminate() executable = self._executable if executable: os.environ['OCTAVE_EXECUTABLE'] = executable if 'OCTAVE_EXECUTABLE' not in os.environ and 'OCTAVE' in os.environ: os.environ['OCTAVE_EXECUTABLE'] = os.environ['OCTAVE'] self._engine = OctaveEngine(stdin_handler=self._handle_stdin, logger=self.logger) # Add local Octave scripts. self._engine.eval('addpath("%s");' % HERE.replace(osp.sep, '/'))
[ "def", "restart", "(", "self", ")", ":", "if", "self", ".", "_engine", ":", "self", ".", "_engine", ".", "repl", ".", "terminate", "(", ")", "executable", "=", "self", ".", "_executable", "if", "executable", ":", "os", ".", "environ", "[", "'OCTAVE_EXE...
Restart an Octave session in a clean state
[ "Restart", "an", "Octave", "session", "in", "a", "clean", "state" ]
python
valid
TheHive-Project/Cortex-Analyzers
analyzers/BackscatterIO/backscatter-io.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/BackscatterIO/backscatter-io.py#L52-L97
def summary(self, raw): """Use the Backscatter.io summary data to create a view.""" taxonomies = list() level = 'info' namespace = 'Backscatter.io' if self.service == 'observations': summary = raw.get('results', dict()).get('summary', dict()) taxonomies = taxonomies + [ self.build_taxonomy(level, namespace, 'Observations', summary.get('observations_count', 0)), self.build_taxonomy(level, namespace, 'IP Addresses', summary.get('ip_address_count', 0)), self.build_taxonomy(level, namespace, 'Networks', summary.get('network_count', 0)), self.build_taxonomy(level, namespace, 'AS', summary.get('autonomous_system_count', 0)), self.build_taxonomy(level, namespace, 'Ports', summary.get('port_count', 0)), self.build_taxonomy(level, namespace, 'Protocols', summary.get('protocol_count', 0)) ] elif self.service == 'enrichment': summary = raw.get('results', dict()) if self.data_type == 'ip': taxonomies = taxonomies + [ self.build_taxonomy(level, namespace, 'Network', summary.get('network')), self.build_taxonomy(level, namespace, 'Network Broadcast', summary.get('network_broadcast')), self.build_taxonomy(level, namespace, 'Network Size', summary.get('network_size')), self.build_taxonomy(level, namespace, 'Country', summary.get('country_name')), self.build_taxonomy(level, namespace, 'AS Number', summary.get('as_num')), self.build_taxonomy(level, namespace, 'AS Name', summary.get('as_name')), ] elif self.data_type == 'network': taxonomies = taxonomies + [ self.build_taxonomy(level, namespace, 'Network Size', summary.get('network_size')) ] elif self.data_type == 'autonomous-system': taxonomies = taxonomies + [ self.build_taxonomy(level, namespace, 'Prefix Count', summary.get('prefix_count')), self.build_taxonomy(level, namespace, 'AS Number', summary.get('as_num')), self.build_taxonomy(level, namespace, 'AS Name', summary.get('as_name')) ] elif self.data_type == 'port': for result in raw.get('results', list()): display = "%s (%s)" % (result.get('service'), result.get('protocol')) taxonomies.append(self.build_taxonomy(level, namespace, 'Service', display)) else: pass else: pass return {"taxonomies": taxonomies}
[ "def", "summary", "(", "self", ",", "raw", ")", ":", "taxonomies", "=", "list", "(", ")", "level", "=", "'info'", "namespace", "=", "'Backscatter.io'", "if", "self", ".", "service", "==", "'observations'", ":", "summary", "=", "raw", ".", "get", "(", "...
Use the Backscatter.io summary data to create a view.
[ "Use", "the", "Backscatter", ".", "io", "summary", "data", "to", "create", "a", "view", "." ]
python
train
hyperledger/sawtooth-core
cli/sawtooth_cli/format_utils.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/format_utils.py#L62-L88
def print_terminal_table(headers, data_list, parse_row_fn): """Uses a set of headers, raw data, and a row parsing function, to print data to the terminal in a table of rows and columns. Args: headers (tuple of strings): The headers for each column of data data_list (list of dicts): Raw response data from the validator parse_row_fn (function): Parses a dict of data into a tuple of columns Expected args: data (dict): A single response object from the validator Expected return: cols (tuple): The properties to display in each column """ data_iter = iter(data_list) try: example = next(data_iter) example_row = parse_row_fn(example) data_iter = itertools.chain([example], data_iter) except StopIteration: example_row = [''] * len(headers) format_string = format_terminal_row(headers, example_row) top_row = format_string.format(*headers) print(top_row[0:-3] if top_row.endswith('...') else top_row) for data in data_iter: print(format_string.format(*parse_row_fn(data)))
[ "def", "print_terminal_table", "(", "headers", ",", "data_list", ",", "parse_row_fn", ")", ":", "data_iter", "=", "iter", "(", "data_list", ")", "try", ":", "example", "=", "next", "(", "data_iter", ")", "example_row", "=", "parse_row_fn", "(", "example", ")...
Uses a set of headers, raw data, and a row parsing function, to print data to the terminal in a table of rows and columns. Args: headers (tuple of strings): The headers for each column of data data_list (list of dicts): Raw response data from the validator parse_row_fn (function): Parses a dict of data into a tuple of columns Expected args: data (dict): A single response object from the validator Expected return: cols (tuple): The properties to display in each column
[ "Uses", "a", "set", "of", "headers", "raw", "data", "and", "a", "row", "parsing", "function", "to", "print", "data", "to", "the", "terminal", "in", "a", "table", "of", "rows", "and", "columns", "." ]
python
train
bitcraze/crazyflie-lib-python
cflib/crazyflie/high_level_commander.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/high_level_commander.py#L88-L101
def land(self, absolute_height_m, duration_s, group_mask=ALL_GROUPS): """ vertical land from current x-y position to given height :param absolute_height_m: absolut (m) :param duration_s: time it should take until target height is reached (s) :param group_mask: mask for which CFs this should apply to """ self._send_packet(struct.pack('<BBff', self.COMMAND_LAND, group_mask, absolute_height_m, duration_s))
[ "def", "land", "(", "self", ",", "absolute_height_m", ",", "duration_s", ",", "group_mask", "=", "ALL_GROUPS", ")", ":", "self", ".", "_send_packet", "(", "struct", ".", "pack", "(", "'<BBff'", ",", "self", ".", "COMMAND_LAND", ",", "group_mask", ",", "abs...
vertical land from current x-y position to given height :param absolute_height_m: absolut (m) :param duration_s: time it should take until target height is reached (s) :param group_mask: mask for which CFs this should apply to
[ "vertical", "land", "from", "current", "x", "-", "y", "position", "to", "given", "height" ]
python
train
ynop/audiomate
audiomate/corpus/subset/utils.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/subset/utils.py#L75-L173
def get_identifiers_splitted_by_weights(identifiers={}, proportions={}): """ Divide the given identifiers based on the given proportions. But instead of randomly split the identifiers it is based on category weights. Every identifier has a weight for any number of categories. The target is, to split the identifiers in a way, so the sum of category k within part x is proportional to the sum of category x over all parts according to the given proportions. This is done by greedily insert the identifiers step by step in a part which has free space (weight). If there are no fitting parts anymore, the one with the least weight exceed is used. Args: identifiers (dict): A dictionary containing the weights for each identifier (key). Per item a dictionary of weights per category is given. proportions (dict): Dict of proportions, with a identifier as key. Returns: dict: Dictionary containing a list of identifiers per part with the same key as the proportions dict. Example:: >>> identifiers = { >>> 'a': {'music': 2, 'speech': 1}, >>> 'b': {'music': 5, 'speech': 2}, >>> 'c': {'music': 2, 'speech': 4}, >>> 'd': {'music': 1, 'speech': 4}, >>> 'e': {'music': 3, 'speech': 4} >>> } >>> proportions = { >>> "train" : 0.6, >>> "dev" : 0.2, >>> "test" : 0.2 >>> } >>> get_identifiers_splitted_by_weights(identifiers, proportions) { 'train': ['a', 'b', 'd'], 'dev': ['c'], 'test': ['e'] } """ # Get total weight per category sum_per_category = collections.defaultdict(int) for identifier, cat_weights in identifiers.items(): for category, weight in cat_weights.items(): sum_per_category[category] += weight target_weights_per_part = collections.defaultdict(dict) # Get target weight for each part and category for category, total_weight in sum_per_category.items(): abs_proportions = absolute_proportions(proportions, total_weight) for idx, proportion in abs_proportions.items(): target_weights_per_part[idx][category] = proportion # Distribute items greedily part_ids = sorted(list(proportions.keys())) current_weights_per_part = {idx: collections.defaultdict(int) for idx in part_ids} result = collections.defaultdict(list) for identifier in sorted(identifiers.keys()): cat_weights = identifiers[identifier] target_part = None current_part = 0 weight_over_target = collections.defaultdict(int) # Search for fitting part while target_part is None and current_part < len(part_ids): free_space = True part_id = part_ids[current_part] part_weights = current_weights_per_part[part_id] for category, weight in cat_weights.items(): target_weight = target_weights_per_part[part_id][category] current_weight = part_weights[category] weight_diff = current_weight + weight - target_weight weight_over_target[part_id] += weight_diff if weight_diff > 0: free_space = False # If weight doesn't exceed target, place identifier in part if free_space: target_part = part_id current_part += 1 # If not found fitting part, select the part with the least overweight if target_part is None: target_part = sorted(weight_over_target.items(), key=lambda x: x[1])[0][0] result[target_part].append(identifier) for category, weight in cat_weights.items(): current_weights_per_part[target_part][category] += weight return result
[ "def", "get_identifiers_splitted_by_weights", "(", "identifiers", "=", "{", "}", ",", "proportions", "=", "{", "}", ")", ":", "# Get total weight per category", "sum_per_category", "=", "collections", ".", "defaultdict", "(", "int", ")", "for", "identifier", ",", ...
Divide the given identifiers based on the given proportions. But instead of randomly split the identifiers it is based on category weights. Every identifier has a weight for any number of categories. The target is, to split the identifiers in a way, so the sum of category k within part x is proportional to the sum of category x over all parts according to the given proportions. This is done by greedily insert the identifiers step by step in a part which has free space (weight). If there are no fitting parts anymore, the one with the least weight exceed is used. Args: identifiers (dict): A dictionary containing the weights for each identifier (key). Per item a dictionary of weights per category is given. proportions (dict): Dict of proportions, with a identifier as key. Returns: dict: Dictionary containing a list of identifiers per part with the same key as the proportions dict. Example:: >>> identifiers = { >>> 'a': {'music': 2, 'speech': 1}, >>> 'b': {'music': 5, 'speech': 2}, >>> 'c': {'music': 2, 'speech': 4}, >>> 'd': {'music': 1, 'speech': 4}, >>> 'e': {'music': 3, 'speech': 4} >>> } >>> proportions = { >>> "train" : 0.6, >>> "dev" : 0.2, >>> "test" : 0.2 >>> } >>> get_identifiers_splitted_by_weights(identifiers, proportions) { 'train': ['a', 'b', 'd'], 'dev': ['c'], 'test': ['e'] }
[ "Divide", "the", "given", "identifiers", "based", "on", "the", "given", "proportions", ".", "But", "instead", "of", "randomly", "split", "the", "identifiers", "it", "is", "based", "on", "category", "weights", ".", "Every", "identifier", "has", "a", "weight", ...
python
train
Tanganelli/CoAPthon3
coapthon/messages/message.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/messages/message.py#L149-L159
def options(self, value): """ Set the options of the CoAP message. :type value: list :param value: list of options """ if value is None: value = [] assert isinstance(value, list) self._options = value
[ "def", "options", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "value", "=", "[", "]", "assert", "isinstance", "(", "value", ",", "list", ")", "self", ".", "_options", "=", "value" ]
Set the options of the CoAP message. :type value: list :param value: list of options
[ "Set", "the", "options", "of", "the", "CoAP", "message", "." ]
python
train
totalgood/twip
docs/notebooks/shakescorpus.py
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/docs/notebooks/shakescorpus.py#L52-L62
def generate_lines(input_file, start=0, stop=float('inf')): """Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time""" with gzip.GzipFile(input_file, 'rU') as f: for i, line in enumerate(f): if i < start: continue if i >= stop: break yield line.rstrip()
[ "def", "generate_lines", "(", "input_file", ",", "start", "=", "0", ",", "stop", "=", "float", "(", "'inf'", ")", ")", ":", "with", "gzip", ".", "GzipFile", "(", "input_file", ",", "'rU'", ")", "as", "f", ":", "for", "i", ",", "line", "in", "enumer...
Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time
[ "Generate", "(", "yield", ")", "lines", "in", "a", "gzipped", "file", "(", "*", ".", "txt", ".", "gz", ")", "one", "line", "at", "a", "time" ]
python
train
wuher/devil
devil/docs/resource.py
https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/docs/resource.py#L97-L104
def _get_method_doc(self): """ Return method documentations. """ ret = {} for method_name in self.methods: method = getattr(self, method_name, None) if method: ret[method_name] = method.__doc__ return ret
[ "def", "_get_method_doc", "(", "self", ")", ":", "ret", "=", "{", "}", "for", "method_name", "in", "self", ".", "methods", ":", "method", "=", "getattr", "(", "self", ",", "method_name", ",", "None", ")", "if", "method", ":", "ret", "[", "method_name",...
Return method documentations.
[ "Return", "method", "documentations", "." ]
python
train
brunato/lograptor
lograptor/timedate.py
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/timedate.py#L181-L203
def strftimegen(start_dt, end_dt): """ Return a generator function for datetime format strings. The generator produce a day-by-day sequence starting from the first datetime to the second datetime argument. """ if start_dt > end_dt: raise ValueError("the start datetime is after the end datetime: (%r,%r)" % (start_dt, end_dt)) def iterftime(string): date_subs = [i for i in DATE_FORMATS if i[1].search(string) is not None] if not date_subs: yield string else: dt = start_dt date_path = string while end_dt >= dt: for item in date_subs: date_path = item[1].sub(dt.strftime(item[0]), date_path) yield date_path dt = dt + datetime.timedelta(days=1) return iterftime
[ "def", "strftimegen", "(", "start_dt", ",", "end_dt", ")", ":", "if", "start_dt", ">", "end_dt", ":", "raise", "ValueError", "(", "\"the start datetime is after the end datetime: (%r,%r)\"", "%", "(", "start_dt", ",", "end_dt", ")", ")", "def", "iterftime", "(", ...
Return a generator function for datetime format strings. The generator produce a day-by-day sequence starting from the first datetime to the second datetime argument.
[ "Return", "a", "generator", "function", "for", "datetime", "format", "strings", ".", "The", "generator", "produce", "a", "day", "-", "by", "-", "day", "sequence", "starting", "from", "the", "first", "datetime", "to", "the", "second", "datetime", "argument", ...
python
train
librosa/librosa
librosa/util/utils.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L209-L233
def valid_intervals(intervals): '''Ensure that an array is a valid representation of time intervals: - intervals.ndim == 2 - intervals.shape[1] == 2 - intervals[i, 0] <= intervals[i, 1] for all i Parameters ---------- intervals : np.ndarray [shape=(n, 2)] set of time intervals Returns ------- valid : bool True if `intervals` passes validation. ''' if intervals.ndim != 2 or intervals.shape[-1] != 2: raise ParameterError('intervals must have shape (n, 2)') if np.any(intervals[:, 0] > intervals[:, 1]): raise ParameterError('intervals={} must have non-negative durations'.format(intervals)) return True
[ "def", "valid_intervals", "(", "intervals", ")", ":", "if", "intervals", ".", "ndim", "!=", "2", "or", "intervals", ".", "shape", "[", "-", "1", "]", "!=", "2", ":", "raise", "ParameterError", "(", "'intervals must have shape (n, 2)'", ")", "if", "np", "."...
Ensure that an array is a valid representation of time intervals: - intervals.ndim == 2 - intervals.shape[1] == 2 - intervals[i, 0] <= intervals[i, 1] for all i Parameters ---------- intervals : np.ndarray [shape=(n, 2)] set of time intervals Returns ------- valid : bool True if `intervals` passes validation.
[ "Ensure", "that", "an", "array", "is", "a", "valid", "representation", "of", "time", "intervals", ":" ]
python
test
chriskiehl/Gooey
gooey/python_bindings/gooey_decorator.py
https://github.com/chriskiehl/Gooey/blob/e598573c6519b953e0ccfc1f3663f827f8cd7e22/gooey/python_bindings/gooey_decorator.py#L22-L99
def Gooey(f=None, advanced=True, language='english', auto_start=False, # TODO: add this to the docs. Used to be `show_config=True` target=None, program_name=None, program_description=None, default_size=(610, 530), use_legacy_titles=True, required_cols=2, optional_cols=2, dump_build_config=False, load_build_config=None, monospace_display=False, # TODO: add this to the docs image_dir='::gooey/default', language_dir=getResourcePath('languages'), progress_regex=None, # TODO: add this to the docs progress_expr=None, # TODO: add this to the docs disable_progress_bar_animation=False, disable_stop_button=False, group_by_type=True, header_height=80, navigation='SIDEBAR', # TODO: add this to the docs tabbed_groups=False, **kwargs): ''' Decorator for client code's main function. Serializes argparse data to JSON for use with the Gooey front end ''' params = merge(locals(), locals()['kwargs']) def build(payload): def run_gooey(self, args=None, namespace=None): source_path = sys.argv[0] build_spec = None if load_build_config: try: build_spec = json.load(open(load_build_config, "r")) except Exception as e: print( 'Exception loading Build Config from {0}: {1}'.format(load_build_config, e)) sys.exit(1) if not build_spec: build_spec = config_generator.create_from_parser( self, source_path, payload_name=payload.__name__, **params) if dump_build_config: config_path = os.path.join(os.getcwd(), 'gooey_config.json') print('Writing Build Config to: {}'.format(config_path)) with open(config_path, 'w') as f: f.write(json.dumps(build_spec, indent=2)) application.run(build_spec) def inner2(*args, **kwargs): ArgumentParser.original_parse_args = ArgumentParser.parse_args ArgumentParser.parse_args = run_gooey return payload(*args, **kwargs) inner2.__name__ = payload.__name__ return inner2 def run_without_gooey(func): return lambda: func() if IGNORE_COMMAND in sys.argv: sys.argv.remove(IGNORE_COMMAND) if callable(f): return run_without_gooey(f) return run_without_gooey if callable(f): return build(f) return build
[ "def", "Gooey", "(", "f", "=", "None", ",", "advanced", "=", "True", ",", "language", "=", "'english'", ",", "auto_start", "=", "False", ",", "# TODO: add this to the docs. Used to be `show_config=True`\r", "target", "=", "None", ",", "program_name", "=", "None", ...
Decorator for client code's main function. Serializes argparse data to JSON for use with the Gooey front end
[ "Decorator", "for", "client", "code", "s", "main", "function", ".", "Serializes", "argparse", "data", "to", "JSON", "for", "use", "with", "the", "Gooey", "front", "end" ]
python
train
ethereum/eth-abi
eth_abi/codec.py
https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L155-L179
def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]: """ Decodes the binary value ``data`` as a sequence of values of the ABI types in ``types`` via the head-tail mechanism into a tuple of equivalent python values. :param types: An iterable of string representations of the ABI types that will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')`` :param data: The binary value to be decoded. :returns: A tuple of equivalent python values for the ABI values represented in ``data``. """ if not is_bytes(data): raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data))) decoders = [ self._registry.get_decoder(type_str) for type_str in types ] decoder = TupleDecoder(decoders=decoders) stream = ContextFramesBytesIO(data) return decoder(stream)
[ "def", "decode_abi", "(", "self", ",", "types", ":", "Iterable", "[", "TypeStr", "]", ",", "data", ":", "Decodable", ")", "->", "Tuple", "[", "Any", ",", "...", "]", ":", "if", "not", "is_bytes", "(", "data", ")", ":", "raise", "TypeError", "(", "\...
Decodes the binary value ``data`` as a sequence of values of the ABI types in ``types`` via the head-tail mechanism into a tuple of equivalent python values. :param types: An iterable of string representations of the ABI types that will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')`` :param data: The binary value to be decoded. :returns: A tuple of equivalent python values for the ABI values represented in ``data``.
[ "Decodes", "the", "binary", "value", "data", "as", "a", "sequence", "of", "values", "of", "the", "ABI", "types", "in", "types", "via", "the", "head", "-", "tail", "mechanism", "into", "a", "tuple", "of", "equivalent", "python", "values", "." ]
python
train
sashs/filebytes
filebytes/pe.py
https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L603-L629
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection): """Parses the ImportDataDirectory and returns a list of ImportDescriptorData""" if not importSection: return raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection)) offset = 0 import_descriptors = [] while True: import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset) if import_descriptor.OriginalFirstThunk == 0: break else: nameOffset = to_offset(import_descriptor.Name, importSection) checkOffset(nameOffset, importSection) dllName = get_str(importSection.raw, nameOffset) import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection) import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection) import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table)) offset += sizeof(IMAGE_IMPORT_DESCRIPTOR) return import_descriptors
[ "def", "_parseDataDirectoryImport", "(", "self", ",", "dataDirectoryEntry", ",", "importSection", ")", ":", "if", "not", "importSection", ":", "return", "raw_bytes", "=", "(", "c_ubyte", "*", "dataDirectoryEntry", ".", "Size", ")", ".", "from_buffer", "(", "impo...
Parses the ImportDataDirectory and returns a list of ImportDescriptorData
[ "Parses", "the", "ImportDataDirectory", "and", "returns", "a", "list", "of", "ImportDescriptorData" ]
python
train
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L410-L443
def parseBEDString(line, scoreType=int, dropAfter=None): """ Parse a string in BED format and return a GenomicInterval object. :param line: the string to be parsed :param dropAfter: an int indicating that any fields after and including this field should be ignored as they don't conform to the BED format. By default, None, meaning we use all fields. Index from zero. :return: GenomicInterval object built from the BED string representation """ peices = line.split("\t") if dropAfter is not None: peices = peices[0:dropAfter] if len(peices) < 3: raise GenomicIntervalError("BED elements must have at least chrom, " + "start and end; found only " + str(len(peices)) + " in " + line) chrom = peices[0] start = peices[1] end = peices[2] name = None score = None strand = None if len(peices) >= 4 is not None: name = peices[3] if len(peices) >= 5 is not None: score = peices[4] if len(peices) >= 6 is not None: strand = peices[5] return GenomicInterval(chrom, start, end, name, score, strand, scoreType)
[ "def", "parseBEDString", "(", "line", ",", "scoreType", "=", "int", ",", "dropAfter", "=", "None", ")", ":", "peices", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "dropAfter", "is", "not", "None", ":", "peices", "=", "peices", "[", "0", ":"...
Parse a string in BED format and return a GenomicInterval object. :param line: the string to be parsed :param dropAfter: an int indicating that any fields after and including this field should be ignored as they don't conform to the BED format. By default, None, meaning we use all fields. Index from zero. :return: GenomicInterval object built from the BED string representation
[ "Parse", "a", "string", "in", "BED", "format", "and", "return", "a", "GenomicInterval", "object", "." ]
python
train
google-research/batch-ppo
agents/tools/loop.py
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L108-L152
def run(self, sess, saver, max_step=None): """Run the loop schedule for a specified number of steps. Call the operation of the current phase until the global step reaches the specified maximum step. Phases are repeated over and over in the order they were added. Args: sess: Session to use to run the phase operation. saver: Saver used for checkpointing. max_step: Run the operations until the step reaches this limit. Yields: Reported mean scores. """ global_step = sess.run(self._step) steps_made = 1 while True: if max_step and global_step >= max_step: break phase, epoch, steps_in = self._find_current_phase(global_step) phase_step = epoch * phase.steps + steps_in if steps_in % phase.steps < steps_made: message = '\n' + ('-' * 50) + '\n' message += 'Phase {} (phase step {}, global step {}).' tf.logging.info(message.format(phase.name, phase_step, global_step)) # Populate book keeping tensors. phase.feed[self._reset] = (steps_in < steps_made) phase.feed[self._log] = ( phase.writer and self._is_every_steps(phase_step, phase.batch, phase.log_every)) phase.feed[self._report] = ( self._is_every_steps(phase_step, phase.batch, phase.report_every)) summary, mean_score, global_step, steps_made = sess.run( phase.op, phase.feed) if self._is_every_steps(phase_step, phase.batch, phase.checkpoint_every): self._store_checkpoint(sess, saver, global_step) if self._is_every_steps(phase_step, phase.batch, phase.report_every): yield mean_score if summary and phase.writer: # We want smaller phases to catch up at the beginnig of each epoch so # that their graphs are aligned. longest_phase = max(phase.steps for phase in self._phases) summary_step = epoch * longest_phase + steps_in phase.writer.add_summary(summary, summary_step)
[ "def", "run", "(", "self", ",", "sess", ",", "saver", ",", "max_step", "=", "None", ")", ":", "global_step", "=", "sess", ".", "run", "(", "self", ".", "_step", ")", "steps_made", "=", "1", "while", "True", ":", "if", "max_step", "and", "global_step"...
Run the loop schedule for a specified number of steps. Call the operation of the current phase until the global step reaches the specified maximum step. Phases are repeated over and over in the order they were added. Args: sess: Session to use to run the phase operation. saver: Saver used for checkpointing. max_step: Run the operations until the step reaches this limit. Yields: Reported mean scores.
[ "Run", "the", "loop", "schedule", "for", "a", "specified", "number", "of", "steps", "." ]
python
train
dustinmm80/healthy
checks.py
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L33-L46
def check_license(package_info, *args): """ Does the package have a license classifier? :param package_info: package_info dictionary :return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied) """ classifiers = package_info.get('classifiers') reason = "No License" result = False if len([c for c in classifiers if c.startswith('License ::')]) > 0: result = True return result, reason, HAS_LICENSE
[ "def", "check_license", "(", "package_info", ",", "*", "args", ")", ":", "classifiers", "=", "package_info", ".", "get", "(", "'classifiers'", ")", "reason", "=", "\"No License\"", "result", "=", "False", "if", "len", "(", "[", "c", "for", "c", "in", "cl...
Does the package have a license classifier? :param package_info: package_info dictionary :return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
[ "Does", "the", "package", "have", "a", "license", "classifier?", ":", "param", "package_info", ":", "package_info", "dictionary", ":", "return", ":", "Tuple", "(", "is", "the", "condition", "True", "or", "False?", "reason", "if", "it", "is", "False", "else",...
python
train
google/grr
grr/server/grr_response_server/databases/db.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/db.py#L1248-L1265
def IterateAllClientSnapshots(self, min_last_ping=None, batch_size=50000): """Iterates over all available clients and yields client snapshot objects. Args: min_last_ping: If provided, only snapshots for clients with last-ping timestamps newer than (or equal to) the given value will be returned. batch_size: Always reads <batch_size> snapshots at a time. Yields: An rdfvalues.objects.ClientSnapshot object for each client in the db. """ all_client_ids = self.ReadAllClientIDs(min_last_ping=min_last_ping) for batch in collection.Batch(all_client_ids, batch_size): res = self.MultiReadClientSnapshot(batch) for snapshot in itervalues(res): if snapshot: yield snapshot
[ "def", "IterateAllClientSnapshots", "(", "self", ",", "min_last_ping", "=", "None", ",", "batch_size", "=", "50000", ")", ":", "all_client_ids", "=", "self", ".", "ReadAllClientIDs", "(", "min_last_ping", "=", "min_last_ping", ")", "for", "batch", "in", "collect...
Iterates over all available clients and yields client snapshot objects. Args: min_last_ping: If provided, only snapshots for clients with last-ping timestamps newer than (or equal to) the given value will be returned. batch_size: Always reads <batch_size> snapshots at a time. Yields: An rdfvalues.objects.ClientSnapshot object for each client in the db.
[ "Iterates", "over", "all", "available", "clients", "and", "yields", "client", "snapshot", "objects", "." ]
python
train
mitsei/dlkit
dlkit/json_/commenting/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/objects.py#L569-L580
def get_book(self): """Gets the ``Book`` at this node. return: (osid.commenting.Book) - the book represented by this node *compliance: mandatory -- This method must be implemented.* """ if self._lookup_session is None: mgr = get_provider_manager('COMMENTING', runtime=self._runtime, proxy=self._proxy) self._lookup_session = mgr.get_book_lookup_session(proxy=getattr(self, "_proxy", None)) return self._lookup_session.get_book(Id(self._my_map['id']))
[ "def", "get_book", "(", "self", ")", ":", "if", "self", ".", "_lookup_session", "is", "None", ":", "mgr", "=", "get_provider_manager", "(", "'COMMENTING'", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", "self...
Gets the ``Book`` at this node. return: (osid.commenting.Book) - the book represented by this node *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "Book", "at", "this", "node", "." ]
python
train
JarryShaw/PyPCAPKit
src/const/ipv4/tos_pre.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/const/ipv4/tos_pre.py#L22-L28
def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return TOS_PRE(key) if key not in TOS_PRE._member_map_: extend_enum(TOS_PRE, key, default) return TOS_PRE[key]
[ "def", "get", "(", "key", ",", "default", "=", "-", "1", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "return", "TOS_PRE", "(", "key", ")", "if", "key", "not", "in", "TOS_PRE", ".", "_member_map_", ":", "extend_enum", "(", "TOS_P...
Backport support for original codes.
[ "Backport", "support", "for", "original", "codes", "." ]
python
train
neon-jungle/wagtailmodelchooser
wagtailmodelchooser/utils.py
https://github.com/neon-jungle/wagtailmodelchooser/blob/8dd1e33dd61418a726ff3acf67a956626c8b7ba1/wagtailmodelchooser/utils.py#L32-L42
def signature_matches(func, args=(), kwargs={}): """ Work out if a function is callable with some args or not. """ try: sig = inspect.signature(func) sig.bind(*args, **kwargs) except TypeError: return False else: return True
[ "def", "signature_matches", "(", "func", ",", "args", "=", "(", ")", ",", "kwargs", "=", "{", "}", ")", ":", "try", ":", "sig", "=", "inspect", ".", "signature", "(", "func", ")", "sig", ".", "bind", "(", "*", "args", ",", "*", "*", "kwargs", "...
Work out if a function is callable with some args or not.
[ "Work", "out", "if", "a", "function", "is", "callable", "with", "some", "args", "or", "not", "." ]
python
valid
saltstack/salt
salt/utils/hashutils.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/hashutils.py#L38-L49
def base64_b64decode(instr): ''' Decode a base64-encoded string using the "modern" Python interface. ''' decoded = base64.b64decode(salt.utils.stringutils.to_bytes(instr)) try: return salt.utils.stringutils.to_unicode( decoded, encoding='utf8' if salt.utils.platform.is_windows() else None ) except UnicodeDecodeError: return decoded
[ "def", "base64_b64decode", "(", "instr", ")", ":", "decoded", "=", "base64", ".", "b64decode", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "instr", ")", ")", "try", ":", "return", "salt", ".", "utils", ".", "stringutils", ".", ...
Decode a base64-encoded string using the "modern" Python interface.
[ "Decode", "a", "base64", "-", "encoded", "string", "using", "the", "modern", "Python", "interface", "." ]
python
train
s0lst1c3/grey_harvest
grey_harvest.py
https://github.com/s0lst1c3/grey_harvest/blob/811e5787ce7e613bc489b8e5e475eaa8790f4d66/grey_harvest.py#L111-L156
def _extract_proxies(self, ajax_endpoint): ''' request the xml object ''' proxy_xml = requests.get(ajax_endpoint) print(proxy_xml.content) root = etree.XML(proxy_xml.content) quote = root.xpath('quote')[0] ''' extract the raw text from the body of the quote tag ''' raw_text = quote.text ''' eliminate the stuff we don't need ''' proxy_data = raw_text.split('You will definitely love it! Give it a try!</td></tr>')[1] ''' get rid of the </table> at the end of proxy_data ''' proxy_data = proxy_data[:-len('</table>')] ''' split proxy_data into rows ''' table_rows = proxy_data.split('<tr>') ''' convert each row into a Proxy object ''' for row in table_rows: ''' get rid of the </tr> at the end of each row ''' row = row[:-len('</tr>')] ''' split each row into a list of items ''' items = row.split('<td>') ''' sometimes we get weird lists containing only an empty string ''' if len(items) != 7: continue ''' we'll use this to remove the </td> from the end of each item ''' tdlen = len('</td>') ''' create proxy dict ''' proxy = Proxy( ip=items[1][:-tdlen], port=int(items[2][:-tdlen]), https=bool(items[3][:-tdlen]), latency=int(items[4][:-tdlen]), last_checked=items[5][:-tdlen], country=items[6][:-tdlen], ) yield proxy
[ "def", "_extract_proxies", "(", "self", ",", "ajax_endpoint", ")", ":", "proxy_xml", "=", "requests", ".", "get", "(", "ajax_endpoint", ")", "print", "(", "proxy_xml", ".", "content", ")", "root", "=", "etree", ".", "XML", "(", "proxy_xml", ".", "content",...
request the xml object
[ "request", "the", "xml", "object" ]
python
train
klen/muffin-peewee
muffin_peewee/debugtoolbar.py
https://github.com/klen/muffin-peewee/blob/8e893e3ea1dfc82fbcfc6efe784308c8d4e2852e/muffin_peewee/debugtoolbar.py#L44-L47
def wrap_handler(self, handler, context_switcher): """Enable/Disable handler.""" context_switcher.add_context_in(lambda: LOGGER.addHandler(self.handler)) context_switcher.add_context_out(lambda: LOGGER.removeHandler(self.handler))
[ "def", "wrap_handler", "(", "self", ",", "handler", ",", "context_switcher", ")", ":", "context_switcher", ".", "add_context_in", "(", "lambda", ":", "LOGGER", ".", "addHandler", "(", "self", ".", "handler", ")", ")", "context_switcher", ".", "add_context_out", ...
Enable/Disable handler.
[ "Enable", "/", "Disable", "handler", "." ]
python
valid
estnltk/estnltk
estnltk/disambiguator.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L306-L322
def __remove_redundant_proper_names(self, docs, lemma_set): """ Eemaldame yleliigsed pärisnimeanalüüsid etteantud sõnalemmade loendi (hulga) põhjal; """ for doc in docs: for word in doc[WORDS]: # Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys: if len(word[ANALYSIS]) > 1: # 1) Leiame analyysid, mis tuleks loendi järgi eemaldada toDelete = [] for analysis in word[ANALYSIS]: if analysis[POSTAG] == 'H' and analysis[ROOT] in lemma_set: toDelete.append( analysis ) # 2) Eemaldame yleliigsed analyysid if toDelete: for analysis in toDelete: word[ANALYSIS].remove(analysis)
[ "def", "__remove_redundant_proper_names", "(", "self", ",", "docs", ",", "lemma_set", ")", ":", "for", "doc", "in", "docs", ":", "for", "word", "in", "doc", "[", "WORDS", "]", ":", "# Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys:", "if", "len", "("...
Eemaldame yleliigsed pärisnimeanalüüsid etteantud sõnalemmade loendi (hulga) põhjal;
[ "Eemaldame", "yleliigsed", "pärisnimeanalüüsid", "etteantud", "sõnalemmade", "loendi", "(", "hulga", ")", "põhjal", ";" ]
python
train
Qiskit/qiskit-terra
qiskit/tools/events/progressbar.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/tools/events/progressbar.py#L60-L68
def start(self, iterations): """Start the progress bar. Parameters: iterations (int): Number of iterations. """ self.touched = True self.iter = int(iterations) self.t_start = time.time()
[ "def", "start", "(", "self", ",", "iterations", ")", ":", "self", ".", "touched", "=", "True", "self", ".", "iter", "=", "int", "(", "iterations", ")", "self", ".", "t_start", "=", "time", ".", "time", "(", ")" ]
Start the progress bar. Parameters: iterations (int): Number of iterations.
[ "Start", "the", "progress", "bar", "." ]
python
test
EpistasisLab/tpot
tpot/builtins/one_hot_encoder.py
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/one_hot_encoder.py#L399-L479
def _transform(self, X): """Asssume X contains only categorical features. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. """ X = self._matrix_adjust(X) X = check_array(X, accept_sparse='csc', force_all_finite=False, dtype=int) if X.min() < 0: raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # Replace all indicators which were below `minimum_fraction` in the # training set by 'other' if self.minimum_fraction is not None: for column in range(X.shape[1]): if sparse.issparse(X): indptr_start = X.indptr[column] indptr_end = X.indptr[column + 1] unique = np.unique(X.data[indptr_start:indptr_end]) else: unique = np.unique(X[:, column]) for unique_value in unique: if unique_value not in self.do_not_replace_by_other_[column]: if sparse.issparse(X): indptr_start = X.indptr[column] indptr_end = X.indptr[column + 1] X.data[indptr_start:indptr_end][ X.data[indptr_start:indptr_end] == unique_value] = SPARSE_ENCODINGS['OTHER'] else: X[:, column][X[:, column] == unique_value] = SPARSE_ENCODINGS['OTHER'] if sparse.issparse(X): n_values_check = X.max(axis=0).toarray().flatten() + 1 else: n_values_check = np.max(X, axis=0) + 1 # Replace all indicators which are out of bounds by 'other' (index 0) if (n_values_check > self.n_values_).any(): # raise ValueError("Feature out of bounds. Try setting n_values.") for i, n_value_check in enumerate(n_values_check): if (n_value_check - 1) >= self.n_values_[i]: if sparse.issparse(X): indptr_start = X.indptr[i] indptr_end = X.indptr[i+1] X.data[indptr_start:indptr_end][X.data[indptr_start:indptr_end] >= self.n_values_[i]] = 0 else: X[:, i][X[:, i] >= self.n_values_[i]] = 0 if sparse.issparse(X): row_indices = X.indices column_indices = [] for i in range(len(X.indptr) - 1): nbr = X.indptr[i + 1] - X.indptr[i] column_indices_ = [indices[i]] * nbr column_indices_ += X.data[X.indptr[i]:X.indptr[i + 1]] column_indices.extend(column_indices_) data = np.ones(X.data.size) else: column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsc() out = out[:, self.active_features_] return out.tocsr() if self.sparse else out.toarray()
[ "def", "_transform", "(", "self", ",", "X", ")", ":", "X", "=", "self", ".", "_matrix_adjust", "(", "X", ")", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "'csc'", ",", "force_all_finite", "=", "False", ",", "dtype", "=", "int", ")",...
Asssume X contains only categorical features. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix.
[ "Asssume", "X", "contains", "only", "categorical", "features", "." ]
python
train
spookey/photon
photon/tools/ping.py
https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/tools/ping.py#L95-L144
def probe(self, hosts): ''' .. seealso:: :attr:`probe` ''' def __send_probe(host): ping = self.m( '', cmdd=dict( cmd=' '.join([ self.__ping_cmd, self.__num, self.__net_if, self.__packetsize, host ]) ), critical=False, verbose=False ) up = True if ping.get('returncode') == 0 else False self.__probe_results[host] = {'up': up} if up: p = ping.get('out') loss = _search(rxlss, p) ms = _findall(rxmst, p) rtt = _search(rxrtt, p) if loss: loss = loss.group('loss') self.__probe_results[host].update(dict( ms=ms, loss=loss, rtt=rtt.groupdict() )) hosts = to_list(hosts) pool_size = ( len(hosts) if len(hosts) <= self.__max_pool_size else self.__max_pool_size ) pool = _Pool(pool_size) pool.map(__send_probe, hosts) pool.close() pool.join()
[ "def", "probe", "(", "self", ",", "hosts", ")", ":", "def", "__send_probe", "(", "host", ")", ":", "ping", "=", "self", ".", "m", "(", "''", ",", "cmdd", "=", "dict", "(", "cmd", "=", "' '", ".", "join", "(", "[", "self", ".", "__ping_cmd", ","...
.. seealso:: :attr:`probe`
[ "..", "seealso", "::", ":", "attr", ":", "probe" ]
python
train
duniter/duniter-python-api
duniterpy/api/endpoint.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L270-L283
def conn_handler(self, session: ClientSession, proxy: str = None) -> ConnectionHandler: """ Return connection handler instance for the endpoint :param session: AIOHTTP client session instance :param proxy: Proxy url :return: """ if self.server: return ConnectionHandler("https", "wss", self.server, self.port, self.path, session, proxy) elif self.ipv6: return ConnectionHandler("https", "wss", "[{0}]".format(self.ipv6), self.port, self.path, session, proxy) return ConnectionHandler("https", "wss", self.ipv4, self.port, self.path, session, proxy)
[ "def", "conn_handler", "(", "self", ",", "session", ":", "ClientSession", ",", "proxy", ":", "str", "=", "None", ")", "->", "ConnectionHandler", ":", "if", "self", ".", "server", ":", "return", "ConnectionHandler", "(", "\"https\"", ",", "\"wss\"", ",", "s...
Return connection handler instance for the endpoint :param session: AIOHTTP client session instance :param proxy: Proxy url :return:
[ "Return", "connection", "handler", "instance", "for", "the", "endpoint" ]
python
train
numenta/htmresearch
htmresearch/support/csv_helper.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/csv_helper.py#L119-L144
def readDir(dirPath, numLabels, modify=False): """ Reads in data from a directory of CSV files; assumes the directory only contains CSV files. @param dirPath (str) Path to the directory. @param numLabels (int) Number of columns of category labels. @param modify (bool) Map the unix friendly category names to the actual names. 0 -> /, _ -> " " @return samplesDict (defaultdict) Keys are CSV names, values are OrderedDicts, where the keys/values are as specified in readCSV(). """ samplesDict = defaultdict(list) for _, _, files in os.walk(dirPath): for f in files: basename, extension = os.path.splitext(os.path.basename(f)) if "." in basename and extension == ".csv": category = basename.split(".")[-1] if modify: category = category.replace("0", "/") category = category.replace("_", " ") samplesDict[category] = readCSV( os.path.join(dirPath, f), numLabels=numLabels) return samplesDict
[ "def", "readDir", "(", "dirPath", ",", "numLabels", ",", "modify", "=", "False", ")", ":", "samplesDict", "=", "defaultdict", "(", "list", ")", "for", "_", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "dirPath", ")", ":", "for", "f", "in",...
Reads in data from a directory of CSV files; assumes the directory only contains CSV files. @param dirPath (str) Path to the directory. @param numLabels (int) Number of columns of category labels. @param modify (bool) Map the unix friendly category names to the actual names. 0 -> /, _ -> " " @return samplesDict (defaultdict) Keys are CSV names, values are OrderedDicts, where the keys/values are as specified in readCSV().
[ "Reads", "in", "data", "from", "a", "directory", "of", "CSV", "files", ";", "assumes", "the", "directory", "only", "contains", "CSV", "files", "." ]
python
train
stanfordnlp/stanza
stanza/research/iterators.py
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/iterators.py#L138-L157
def sized_imap(func, iterable, strict=False): ''' Return an iterable whose elements are the result of applying the callable `func` to each element of `iterable`. If `iterable` has a `len()`, then the iterable returned by this function will have the same `len()`. Otherwise calling `len()` on the returned iterable will raise `TypeError`. :param func: The function to apply to each element of `iterable`. :param iterable: An iterable whose objects will be mapped. :param bool strict: If `True` and `iterable` does not support `len()`, raise an exception immediately instead of returning an iterable that does not support `len()`. ''' try: length = len(iterable) except TypeError: if strict: raise else: return imap(func, iterable) return SizedGenerator(lambda: imap(func, iterable), length=length)
[ "def", "sized_imap", "(", "func", ",", "iterable", ",", "strict", "=", "False", ")", ":", "try", ":", "length", "=", "len", "(", "iterable", ")", "except", "TypeError", ":", "if", "strict", ":", "raise", "else", ":", "return", "imap", "(", "func", ",...
Return an iterable whose elements are the result of applying the callable `func` to each element of `iterable`. If `iterable` has a `len()`, then the iterable returned by this function will have the same `len()`. Otherwise calling `len()` on the returned iterable will raise `TypeError`. :param func: The function to apply to each element of `iterable`. :param iterable: An iterable whose objects will be mapped. :param bool strict: If `True` and `iterable` does not support `len()`, raise an exception immediately instead of returning an iterable that does not support `len()`.
[ "Return", "an", "iterable", "whose", "elements", "are", "the", "result", "of", "applying", "the", "callable", "func", "to", "each", "element", "of", "iterable", ".", "If", "iterable", "has", "a", "len", "()", "then", "the", "iterable", "returned", "by", "t...
python
train
quantmind/pulsar
pulsar/utils/context.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/context.py#L40-L48
def set(self, key, value): """Set a value in the task context """ task = Task.current_task() try: context = task._context except AttributeError: task._context = context = {} context[key] = value
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "task", "=", "Task", ".", "current_task", "(", ")", "try", ":", "context", "=", "task", ".", "_context", "except", "AttributeError", ":", "task", ".", "_context", "=", "context", "=", "{"...
Set a value in the task context
[ "Set", "a", "value", "in", "the", "task", "context" ]
python
train
Contraz/demosys-py
demosys/loaders/scene/gltf.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/loaders/scene/gltf.py#L340-L348
def buffers_exist(self): """Checks if the bin files referenced exist""" for buff in self.buffers: if not buff.is_separate_file: continue path = self.path.parent / buff.uri if not os.path.exists(path): raise FileNotFoundError("Buffer {} referenced in {} not found".format(path, self.path))
[ "def", "buffers_exist", "(", "self", ")", ":", "for", "buff", "in", "self", ".", "buffers", ":", "if", "not", "buff", ".", "is_separate_file", ":", "continue", "path", "=", "self", ".", "path", ".", "parent", "/", "buff", ".", "uri", "if", "not", "os...
Checks if the bin files referenced exist
[ "Checks", "if", "the", "bin", "files", "referenced", "exist" ]
python
valid
martinmcbride/pysound
pysound/oscillators.py
https://github.com/martinmcbride/pysound/blob/253c8f712ad475318350e5a8ba21f6fefd7a3de2/pysound/oscillators.py#L90-L100
def sine_wave(params, frequency=400, amplitude=1, offset=0): ''' Generate a sine wave Convenience function, table_wave generates a sine wave by default :param params: buffer parameters, controls length of signal created :param frequency: wave frequency (array or value) :param amplitude: wave amplitude (array or value) :param offset: offset of wave mean from zero (array or value) :return: array of resulting signal ''' return table_wave(params, frequency, amplitude, offset)
[ "def", "sine_wave", "(", "params", ",", "frequency", "=", "400", ",", "amplitude", "=", "1", ",", "offset", "=", "0", ")", ":", "return", "table_wave", "(", "params", ",", "frequency", ",", "amplitude", ",", "offset", ")" ]
Generate a sine wave Convenience function, table_wave generates a sine wave by default :param params: buffer parameters, controls length of signal created :param frequency: wave frequency (array or value) :param amplitude: wave amplitude (array or value) :param offset: offset of wave mean from zero (array or value) :return: array of resulting signal
[ "Generate", "a", "sine", "wave", "Convenience", "function", "table_wave", "generates", "a", "sine", "wave", "by", "default", ":", "param", "params", ":", "buffer", "parameters", "controls", "length", "of", "signal", "created", ":", "param", "frequency", ":", "...
python
train
angr/angr
angr/analyses/ddg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/ddg.py#L1299-L1314
def _data_graph_add_edge(self, src, dst, **edge_labels): """ Add an edge in the data dependence graph. :param ProgramVariable src: Source node. :param ProgramVariable dst: Destination node. :param edge_labels: All labels associated with the edge. :return: None """ if src in self._data_graph and dst in self._data_graph[src]: return self._data_graph.add_edge(src, dst, **edge_labels) self._simplified_data_graph = None
[ "def", "_data_graph_add_edge", "(", "self", ",", "src", ",", "dst", ",", "*", "*", "edge_labels", ")", ":", "if", "src", "in", "self", ".", "_data_graph", "and", "dst", "in", "self", ".", "_data_graph", "[", "src", "]", ":", "return", "self", ".", "_...
Add an edge in the data dependence graph. :param ProgramVariable src: Source node. :param ProgramVariable dst: Destination node. :param edge_labels: All labels associated with the edge. :return: None
[ "Add", "an", "edge", "in", "the", "data", "dependence", "graph", "." ]
python
train
datakortet/dkfileutils
dkfileutils/path.py
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/path.py#L329-L333
def list(self, filterfn=lambda x: True): """Return all direct descendands of directory `self` for which `filterfn` returns True. """ return [self / p for p in self.listdir() if filterfn(self / p)]
[ "def", "list", "(", "self", ",", "filterfn", "=", "lambda", "x", ":", "True", ")", ":", "return", "[", "self", "/", "p", "for", "p", "in", "self", ".", "listdir", "(", ")", "if", "filterfn", "(", "self", "/", "p", ")", "]" ]
Return all direct descendands of directory `self` for which `filterfn` returns True.
[ "Return", "all", "direct", "descendands", "of", "directory", "self", "for", "which", "filterfn", "returns", "True", "." ]
python
train
jtwhite79/pyemu
pyemu/en.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/en.py#L110-L155
def draw(self,cov,num_reals=1,names=None): """ draw random realizations from a multivariate Gaussian distribution Parameters ---------- cov: pyemu.Cov covariance structure to draw from num_reals: int number of realizations to generate names : list list of columns names to draw for. If None, values all names are drawn """ real_names = np.arange(num_reals,dtype=np.int64) # make sure everything is cool WRT ordering if names is not None: vals = self.mean_values.loc[names] cov = cov.get(names) elif self.names != cov.row_names: names = get_common_elements(self.names, cov.row_names) vals = self.mean_values.loc[names] cov = cov.get(names) else: vals = self.mean_values names = self.names # generate random numbers if cov.isdiagonal: #much faster val_array = np.array([np.random.normal(mu,std,size=num_reals) for\ mu,std in zip(vals,np.sqrt(cov.x))]).transpose() else: val_array = np.random.multivariate_normal(vals, cov.as_2d,num_reals) self.loc[:,:] = np.NaN self.dropna(inplace=True) # this sucks - can only set by enlargement one row at a time for rname,vals in zip(real_names,val_array): self.loc[rname, names] = vals # set NaNs to mean_values idx = pd.isnull(self.loc[rname,:]) self.loc[rname,idx] = self.mean_values[idx]
[ "def", "draw", "(", "self", ",", "cov", ",", "num_reals", "=", "1", ",", "names", "=", "None", ")", ":", "real_names", "=", "np", ".", "arange", "(", "num_reals", ",", "dtype", "=", "np", ".", "int64", ")", "# make sure everything is cool WRT ordering", ...
draw random realizations from a multivariate Gaussian distribution Parameters ---------- cov: pyemu.Cov covariance structure to draw from num_reals: int number of realizations to generate names : list list of columns names to draw for. If None, values all names are drawn
[ "draw", "random", "realizations", "from", "a", "multivariate", "Gaussian", "distribution" ]
python
train
gpagliuca/pyfas
build/lib/pyfas/tab.py
https://github.com/gpagliuca/pyfas/blob/5daa1199bd124d315d02bef0ad3888a8f58355b2/build/lib/pyfas/tab.py#L134-L152
def _export_all_fixed(self): """ Exports all the properties for a fixed-type tab file """ array_ts = [] array_ps = [] for array_t, array_p in it.product(self.metadata["t_array"][0], self.metadata["p_array"][0]): array_ts.append(array_t) array_ps.append(array_p/1e5) array_ts_tot = [array_ts for t in self.data.index] array_ps_tot = [array_ps for t in self.data.index] values = [] for idx in self.data.index: values.append(self._partial_extraction_fixed(idx+1)) self.data["Temperature"] = array_ts_tot self.data["Pressure"] = array_ps_tot self.data["values"] = values
[ "def", "_export_all_fixed", "(", "self", ")", ":", "array_ts", "=", "[", "]", "array_ps", "=", "[", "]", "for", "array_t", ",", "array_p", "in", "it", ".", "product", "(", "self", ".", "metadata", "[", "\"t_array\"", "]", "[", "0", "]", ",", "self", ...
Exports all the properties for a fixed-type tab file
[ "Exports", "all", "the", "properties", "for", "a", "fixed", "-", "type", "tab", "file" ]
python
train
nerdvegas/rez
src/rez/solver.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1167-L1366
def solve(self): """Attempt to solve the phase.""" if self.status != SolverStatus.pending: return self scopes = self.scopes[:] failure_reason = None extractions = {} changed_scopes_i = self.changed_scopes_i.copy() def _create_phase(status=None): phase = copy.copy(self) phase.scopes = scopes phase.failure_reason = failure_reason phase.extractions = extractions phase.changed_scopes_i = set() if status is None: phase.status = (SolverStatus.solved if phase._is_solved() else SolverStatus.exhausted) else: phase.status = status return phase # iteratively reduce until no more reductions possible while True: prev_num_scopes = len(scopes) widened_scopes_i = set() # iteratively extract until no more extractions possible while True: self.pr.subheader("EXTRACTING:") extracted_requests = [] # perform all possible extractions with self.solver.timed(self.solver.extraction_time): for i in range(len(scopes)): while True: scope_, extracted_request = scopes[i].extract() if extracted_request: extracted_requests.append(extracted_request) k = (scopes[i].package_name, extracted_request.name) extractions[k] = extracted_request self.solver.extractions_count += 1 scopes[i] = scope_ else: break if not extracted_requests: break # simplify extractions (there may be overlaps) self.pr.subheader("MERGE-EXTRACTIONS:") extracted_requests = RequirementList(extracted_requests) if extracted_requests.conflict: # extractions are in conflict req1, req2 = extracted_requests.conflict conflict = DependencyConflict(req1, req2) failure_reason = DependencyConflicts([conflict]) return _create_phase(SolverStatus.failed) elif self.pr: self.pr("merged extractions: %s", extracted_requests) # intersect extracted requests with current scopes self.pr.subheader("INTERSECTING:") req_fams = [] with self.solver.timed(self.solver.intersection_test_time): for i, scope in enumerate(scopes): extracted_req = extracted_requests.get(scope.package_name) if extracted_req is None: continue # perform the intersection scope_ = scope.intersect(extracted_req.range) req_fams.append(extracted_req.name) if scope_ is None: # the scope conflicted with the extraction conflict = DependencyConflict( extracted_req, scope.package_request) failure_reason = DependencyConflicts([conflict]) return _create_phase(SolverStatus.failed) if scope_ is not scope: # the scope was narrowed because it intersected # with an extraction scopes[i] = scope_ changed_scopes_i.add(i) self.solver.intersections_count += 1 # if the intersection caused a conflict scope to turn # into a non-conflict scope, then it has to be reduced # against all other scopes. # # In the very common case, if a scope changes then it # has been narrowed, so there is no need to reduce it # against other unchanged scopes. In this case however, # the scope actually widens! For eg, '~foo-1' may be # intersected with 'foo' to become 'foo-1', which might # then reduce against existing scopes. # if scope.is_conflict and not scope_.is_conflict: widened_scopes_i.add(i) # add new scopes new_extracted_reqs = [ x for x in extracted_requests.requirements if x.name not in req_fams] if new_extracted_reqs: self.pr.subheader("ADDING:") #n = len(scopes) for req in new_extracted_reqs: scope = _PackageScope(req, solver=self.solver) scopes.append(scope) if self.pr: self.pr("added %s", scope) num_scopes = len(scopes) # no further reductions to do if (num_scopes == prev_num_scopes) \ and not changed_scopes_i \ and not widened_scopes_i: break # iteratively reduce until no more reductions possible self.pr.subheader("REDUCING:") if not self.solver.optimised: # force reductions across all scopes changed_scopes_i = set(range(num_scopes)) prev_num_scopes = num_scopes # create set of pending reductions from the list of changed scopes # and list of added scopes. We use a sorted set because the solver # must be deterministic, ie its behavior must always be the same for # a given solve. A normal set does not guarantee order. # # Each item is an (x, y) tuple, where scope[x] will reduce by # scope[y].package_request. # pending_reducts = SortedSet() all_scopes_i = range(num_scopes) added_scopes_i = range(prev_num_scopes, num_scopes) for x in range(prev_num_scopes): # existing scopes must reduce against changed scopes for y in changed_scopes_i: if x != y: pending_reducts.add((x, y)) # existing scopes must reduce against newly added scopes for y in added_scopes_i: pending_reducts.add((x, y)) # newly added scopes must reduce against all other scopes for x in added_scopes_i: for y in all_scopes_i: if x != y: pending_reducts.add((x, y)) # 'widened' scopes (see earlier comment in this func) must reduce # against all other scopes for x in widened_scopes_i: for y in all_scopes_i: if x != y: pending_reducts.add((x, y)) # iteratively reduce until there are no more pending reductions. # Note that if a scope is reduced, then other scopes need to reduce # against it once again. with self.solver.timed(self.solver.reduction_test_time): while pending_reducts: x, y = pending_reducts.pop() new_scope, reductions = scopes[x].reduce_by( scopes[y].package_request) if new_scope is None: failure_reason = TotalReduction(reductions) return _create_phase(SolverStatus.failed) elif new_scope is not scopes[x]: scopes[x] = new_scope # other scopes need to reduce against x again for j in all_scopes_i: if j != x: pending_reducts.add((j, x)) changed_scopes_i = set() return _create_phase()
[ "def", "solve", "(", "self", ")", ":", "if", "self", ".", "status", "!=", "SolverStatus", ".", "pending", ":", "return", "self", "scopes", "=", "self", ".", "scopes", "[", ":", "]", "failure_reason", "=", "None", "extractions", "=", "{", "}", "changed_...
Attempt to solve the phase.
[ "Attempt", "to", "solve", "the", "phase", "." ]
python
train
pypa/pipenv
pipenv/vendor/jinja2/filters.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/filters.py#L536-L574
def do_indent( s, width=4, first=False, blank=False, indentfirst=None ): """Return a copy of the string with each line indented by 4 spaces. The first line and blank lines are not indented by default. :param width: Number of spaces to indent by. :param first: Don't skip indenting the first line. :param blank: Don't skip indenting empty lines. .. versionchanged:: 2.10 Blank lines are not indented by default. Rename the ``indentfirst`` argument to ``first``. """ if indentfirst is not None: warnings.warn(DeprecationWarning( 'The "indentfirst" argument is renamed to "first".' ), stacklevel=2) first = indentfirst s += u'\n' # this quirk is necessary for splitlines method indention = u' ' * width if blank: rv = (u'\n' + indention).join(s.splitlines()) else: lines = s.splitlines() rv = lines.pop(0) if lines: rv += u'\n' + u'\n'.join( indention + line if line else line for line in lines ) if first: rv = indention + rv return rv
[ "def", "do_indent", "(", "s", ",", "width", "=", "4", ",", "first", "=", "False", ",", "blank", "=", "False", ",", "indentfirst", "=", "None", ")", ":", "if", "indentfirst", "is", "not", "None", ":", "warnings", ".", "warn", "(", "DeprecationWarning", ...
Return a copy of the string with each line indented by 4 spaces. The first line and blank lines are not indented by default. :param width: Number of spaces to indent by. :param first: Don't skip indenting the first line. :param blank: Don't skip indenting empty lines. .. versionchanged:: 2.10 Blank lines are not indented by default. Rename the ``indentfirst`` argument to ``first``.
[ "Return", "a", "copy", "of", "the", "string", "with", "each", "line", "indented", "by", "4", "spaces", ".", "The", "first", "line", "and", "blank", "lines", "are", "not", "indented", "by", "default", "." ]
python
train
ktbyers/netmiko
netmiko/_textfsm/_texttable.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/_textfsm/_texttable.py#L213-L228
def _SetColour(self, value_list): """Sets row's colour attributes to a list of values in terminal.SGR.""" if value_list is None: self._color = None return colors = [] for color in value_list: if color in terminal.SGR: colors.append(color) elif color in terminal.FG_COLOR_WORDS: colors += terminal.FG_COLOR_WORDS[color] elif color in terminal.BG_COLOR_WORDS: colors += terminal.BG_COLOR_WORDS[color] else: raise ValueError("Invalid colour specification.") self._color = list(set(colors))
[ "def", "_SetColour", "(", "self", ",", "value_list", ")", ":", "if", "value_list", "is", "None", ":", "self", ".", "_color", "=", "None", "return", "colors", "=", "[", "]", "for", "color", "in", "value_list", ":", "if", "color", "in", "terminal", ".", ...
Sets row's colour attributes to a list of values in terminal.SGR.
[ "Sets", "row", "s", "colour", "attributes", "to", "a", "list", "of", "values", "in", "terminal", ".", "SGR", "." ]
python
train
dlintott/gns3-converter
gns3converter/node.py
https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/node.py#L376-L395
def calc_link(self, src_id, src_port, src_port_name, destination): """ Add a link item for processing later :param int src_id: Source node ID :param int src_port: Source port ID :param str src_port_name: Source port name :param dict destination: Destination """ if destination['device'] == 'NIO': destination['port'] = destination['port'].lower() link = {'source_node_id': src_id, 'source_port_id': src_port, 'source_port_name': src_port_name, 'source_dev': self.node['properties']['name'], 'dest_dev': destination['device'], 'dest_port': destination['port']} self.links.append(link)
[ "def", "calc_link", "(", "self", ",", "src_id", ",", "src_port", ",", "src_port_name", ",", "destination", ")", ":", "if", "destination", "[", "'device'", "]", "==", "'NIO'", ":", "destination", "[", "'port'", "]", "=", "destination", "[", "'port'", "]", ...
Add a link item for processing later :param int src_id: Source node ID :param int src_port: Source port ID :param str src_port_name: Source port name :param dict destination: Destination
[ "Add", "a", "link", "item", "for", "processing", "later" ]
python
train
liip/taxi
taxi/commands/alias.py
https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/commands/alias.py#L50-L63
def add(ctx, alias, mapping, backend): """ Add a new alias to your configuration file. """ if not backend: backends_list = ctx.obj['settings'].get_backends() if len(backends_list) > 1: raise click.UsageError( "You're using more than 1 backend. Please set the backend to " "add the alias to with the --backend option (choices are %s)" % ", ".join(dict(backends_list).keys()) ) add_mapping(ctx, alias, mapping, backend)
[ "def", "add", "(", "ctx", ",", "alias", ",", "mapping", ",", "backend", ")", ":", "if", "not", "backend", ":", "backends_list", "=", "ctx", ".", "obj", "[", "'settings'", "]", ".", "get_backends", "(", ")", "if", "len", "(", "backends_list", ")", ">"...
Add a new alias to your configuration file.
[ "Add", "a", "new", "alias", "to", "your", "configuration", "file", "." ]
python
train
saltstack/salt
salt/modules/win_system.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L1297-L1332
def get_pending_domain_join(): ''' Determine whether there is a pending domain join action that requires a reboot. .. versionadded:: 2016.11.0 Returns: bool: ``True`` if there is a pending domain join action, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' system.get_pending_domain_join ''' base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon' avoid_key = r'{0}\AvoidSpnSet'.format(base_key) join_key = r'{0}\JoinDomain'.format(base_key) # If either the avoid_key or join_key is present, # then there is a reboot pending. if __utils__['reg.key_exists']('HKLM', avoid_key): log.debug('Key exists: %s', avoid_key) return True else: log.debug('Key does not exist: %s', avoid_key) if __utils__['reg.key_exists']('HKLM', join_key): log.debug('Key exists: %s', join_key) return True else: log.debug('Key does not exist: %s', join_key) return False
[ "def", "get_pending_domain_join", "(", ")", ":", "base_key", "=", "r'SYSTEM\\CurrentControlSet\\Services\\Netlogon'", "avoid_key", "=", "r'{0}\\AvoidSpnSet'", ".", "format", "(", "base_key", ")", "join_key", "=", "r'{0}\\JoinDomain'", ".", "format", "(", "base_key", ")"...
Determine whether there is a pending domain join action that requires a reboot. .. versionadded:: 2016.11.0 Returns: bool: ``True`` if there is a pending domain join action, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' system.get_pending_domain_join
[ "Determine", "whether", "there", "is", "a", "pending", "domain", "join", "action", "that", "requires", "a", "reboot", "." ]
python
train
juicer/juicer
juicer/utils/texttable.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/utils/texttable.py#L373-L397
def _compute_cols_width(self): """Return an array with the width of each column If a specific width has been specified, exit. If the total of the columns width exceed the table desired width, another width will be computed to fit, and cells will be wrapped. """ if hasattr(self, "_width"): return maxi = [] if self._header: maxi = [ self._len_cell(x) for x in self._header ] for row in self._rows: for cell,i in zip(row, range(len(row))): try: maxi[i] = max(maxi[i], self._len_cell(cell)) except (TypeError, IndexError): maxi.append(self._len_cell(cell)) items = len(maxi) length = reduce(lambda x,y: x+y, maxi) if self._max_width and length + items*3 + 1 > self._max_width: maxi = [(self._max_width - items*3 -1) / items \ for n in range(items)] self._width = maxi
[ "def", "_compute_cols_width", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "\"_width\"", ")", ":", "return", "maxi", "=", "[", "]", "if", "self", ".", "_header", ":", "maxi", "=", "[", "self", ".", "_len_cell", "(", "x", ")", "for", "x...
Return an array with the width of each column If a specific width has been specified, exit. If the total of the columns width exceed the table desired width, another width will be computed to fit, and cells will be wrapped.
[ "Return", "an", "array", "with", "the", "width", "of", "each", "column" ]
python
train
pycontribs/jira
jira/resources.py
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/resources.py#L573-L580
def delete(self, deleteSubtasks=False): """Delete this issue from the server. :param deleteSubtasks: if the issue has subtasks, this argument must be set to true for the call to succeed. :type deleteSubtasks: bool """ super(Issue, self).delete(params={'deleteSubtasks': deleteSubtasks})
[ "def", "delete", "(", "self", ",", "deleteSubtasks", "=", "False", ")", ":", "super", "(", "Issue", ",", "self", ")", ".", "delete", "(", "params", "=", "{", "'deleteSubtasks'", ":", "deleteSubtasks", "}", ")" ]
Delete this issue from the server. :param deleteSubtasks: if the issue has subtasks, this argument must be set to true for the call to succeed. :type deleteSubtasks: bool
[ "Delete", "this", "issue", "from", "the", "server", "." ]
python
train
d0ugal/python-rfxcom
rfxcom/protocol/elec.py
https://github.com/d0ugal/python-rfxcom/blob/2eb87f85e5f5a04d00f32f25e0f010edfefbde0d/rfxcom/protocol/elec.py#L77-L129
def parse(self, data): """Parse a 18 bytes packet in the Electricity format and return a dictionary containing the data extracted. An example of a return value would be: .. code-block:: python { 'count': 3, 'current_watts': 692, 'id': "0x2EB2", 'packet_length': 17, 'packet_type': 90, 'packet_type_name': 'Energy usage sensors', 'sequence_number': 0, 'packet_subtype': 1, 'packet_subtype_name': "CM119/160", 'total_watts': 920825.1947099693, 'signal_level': 9, 'battery_level': 6, } :param data: bytearray to be parsed :type data: bytearray :return: Data dictionary containing the parsed values :rtype: dict """ self.validate_packet(data) TOTAL_DIVISOR = 223.666 id_ = self.dump_hex(data[4:6]) count = data[6] instant = data[7:11] total = data[11:16] current_watts = self._bytes_to_uint_32(instant) total_watts = self._bytes_to_uint_48(total) / TOTAL_DIVISOR sensor_specific = { 'count': count, 'current_watts': current_watts, 'id': id_, 'total_watts': total_watts } results = self.parse_header_part(data) results.update(RfxPacketUtils.parse_signal_and_battery(data[17])) results.update(sensor_specific) return results
[ "def", "parse", "(", "self", ",", "data", ")", ":", "self", ".", "validate_packet", "(", "data", ")", "TOTAL_DIVISOR", "=", "223.666", "id_", "=", "self", ".", "dump_hex", "(", "data", "[", "4", ":", "6", "]", ")", "count", "=", "data", "[", "6", ...
Parse a 18 bytes packet in the Electricity format and return a dictionary containing the data extracted. An example of a return value would be: .. code-block:: python { 'count': 3, 'current_watts': 692, 'id': "0x2EB2", 'packet_length': 17, 'packet_type': 90, 'packet_type_name': 'Energy usage sensors', 'sequence_number': 0, 'packet_subtype': 1, 'packet_subtype_name': "CM119/160", 'total_watts': 920825.1947099693, 'signal_level': 9, 'battery_level': 6, } :param data: bytearray to be parsed :type data: bytearray :return: Data dictionary containing the parsed values :rtype: dict
[ "Parse", "a", "18", "bytes", "packet", "in", "the", "Electricity", "format", "and", "return", "a", "dictionary", "containing", "the", "data", "extracted", ".", "An", "example", "of", "a", "return", "value", "would", "be", ":" ]
python
train
adamziel/python_translate
python_translate/loaders.py
https://github.com/adamziel/python_translate/blob/0aee83f434bd2d1b95767bcd63adb7ac7036c7df/python_translate/loaders.py#L79-L92
def read_file(self, path): """ Reads a file into memory and returns it's contents @type path: str @param path: path to load """ self.assert_valid_path(path) with open(path, 'rb') as file: contents = file.read().decode('UTF-8') return contents
[ "def", "read_file", "(", "self", ",", "path", ")", ":", "self", ".", "assert_valid_path", "(", "path", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "file", ":", "contents", "=", "file", ".", "read", "(", ")", ".", "decode", "(", "'UTF-...
Reads a file into memory and returns it's contents @type path: str @param path: path to load
[ "Reads", "a", "file", "into", "memory", "and", "returns", "it", "s", "contents" ]
python
train
nedbat/django_coverage_plugin
django_coverage_plugin/plugin.py
https://github.com/nedbat/django_coverage_plugin/blob/0072737c0ea5a1ca6b9f046af4947de191f13804/django_coverage_plugin/plugin.py#L397-L418
def dump_frame(frame, label=""): """Dump interesting information about this frame.""" locals = dict(frame.f_locals) self = locals.get('self', None) context = locals.get('context', None) if "__builtins__" in locals: del locals["__builtins__"] if label: label = " ( %s ) " % label print("-- frame --%s---------------------" % label) print("{}:{}:{}".format( os.path.basename(frame.f_code.co_filename), frame.f_lineno, type(self), )) print(locals) if self: print("self:", self.__dict__) if context: print("context:", context.__dict__) print("\\--")
[ "def", "dump_frame", "(", "frame", ",", "label", "=", "\"\"", ")", ":", "locals", "=", "dict", "(", "frame", ".", "f_locals", ")", "self", "=", "locals", ".", "get", "(", "'self'", ",", "None", ")", "context", "=", "locals", ".", "get", "(", "'cont...
Dump interesting information about this frame.
[ "Dump", "interesting", "information", "about", "this", "frame", "." ]
python
train
oanda/v20-python
src/v20/order.py
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/order.py#L3730-L3855
def list( self, accountID, **kwargs ): """ Get a list of Orders for an Account Args: accountID: Account Identifier ids: List of Order IDs to retrieve state: The state to filter the requested Orders by instrument: The instrument to filter the requested orders by count: The maximum number of Orders to return beforeID: The maximum Order ID to return. If not provided the most recent Orders in the Account are returned Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/accounts/{accountID}/orders' ) request.set_path_param( 'accountID', accountID ) request.set_param( 'ids', kwargs.get('ids') ) request.set_param( 'state', kwargs.get('state') ) request.set_param( 'instrument', kwargs.get('instrument') ) request.set_param( 'count', kwargs.get('count') ) request.set_param( 'beforeID', kwargs.get('beforeID') ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('orders') is not None: parsed_body['orders'] = [ self.ctx.order.Order.from_dict(d, self.ctx) for d in jbody.get('orders') ] if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "400": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
[ "def", "list", "(", "self", ",", "accountID", ",", "*", "*", "kwargs", ")", ":", "request", "=", "Request", "(", "'GET'", ",", "'/v3/accounts/{accountID}/orders'", ")", "request", ".", "set_path_param", "(", "'accountID'", ",", "accountID", ")", "request", "...
Get a list of Orders for an Account Args: accountID: Account Identifier ids: List of Order IDs to retrieve state: The state to filter the requested Orders by instrument: The instrument to filter the requested orders by count: The maximum number of Orders to return beforeID: The maximum Order ID to return. If not provided the most recent Orders in the Account are returned Returns: v20.response.Response containing the results from submitting the request
[ "Get", "a", "list", "of", "Orders", "for", "an", "Account" ]
python
train
fermiPy/fermipy
fermipy/merge_utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/merge_utils.py#L39-L50
def update_image(hdu_in, hdu=None): """ 'Update' an image HDU This checks hdu exists and creates it from hdu_in if it does not. If hdu does exist, this adds the data in hdu_in to hdu """ if hdu is None: hdu = fits.ImageHDU( data=hdu_in.data, header=hdu_in.header, name=hdu_in.name) else: hdu.data += hdu_in.data return hdu
[ "def", "update_image", "(", "hdu_in", ",", "hdu", "=", "None", ")", ":", "if", "hdu", "is", "None", ":", "hdu", "=", "fits", ".", "ImageHDU", "(", "data", "=", "hdu_in", ".", "data", ",", "header", "=", "hdu_in", ".", "header", ",", "name", "=", ...
'Update' an image HDU This checks hdu exists and creates it from hdu_in if it does not. If hdu does exist, this adds the data in hdu_in to hdu
[ "Update", "an", "image", "HDU" ]
python
train
numenta/nupic
examples/tm/tm_overlapping_sequences.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/tm/tm_overlapping_sequences.py#L113-L176
def buildOverlappedSequences( numSequences = 2, seqLen = 5, sharedElements = [3,4], numOnBitsPerPattern = 3, patternOverlap = 0, seqOverlap = 0, **kwargs ): """ Create training sequences that share some elements in the middle. Parameters: ----------------------------------------------------- numSequences: Number of unique training sequences to generate seqLen: Overall length of each sequence sharedElements: Which element indices of each sequence are shared. These will be in the range between 0 and seqLen-1 numOnBitsPerPattern: Number of ON bits in each TM input pattern patternOverlap: Max number of bits of overlap between any 2 patterns retval: (numCols, trainingSequences) numCols - width of the patterns trainingSequences - a list of training sequences """ # Total number of patterns used to build the sequences numSharedElements = len(sharedElements) numUniqueElements = seqLen - numSharedElements numPatterns = numSharedElements + numUniqueElements * numSequences # Create the table of patterns patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap) # Total number of columns required numCols = len(patterns[0]) # ----------------------------------------------------------------------- # Create the training sequences trainingSequences = [] uniquePatternIndices = range(numSharedElements, numPatterns) for _ in xrange(numSequences): sequence = [] # pattern indices [0 ... numSharedElements-1] are reserved for the shared # middle sharedPatternIndices = range(numSharedElements) # Build up the sequence for j in xrange(seqLen): if j in sharedElements: patIdx = sharedPatternIndices.pop(0) else: patIdx = uniquePatternIndices.pop(0) sequence.append(patterns[patIdx]) trainingSequences.append(sequence) if VERBOSITY >= 3: print "\nTraining sequences" printAllTrainingSequences(trainingSequences) return (numCols, trainingSequences)
[ "def", "buildOverlappedSequences", "(", "numSequences", "=", "2", ",", "seqLen", "=", "5", ",", "sharedElements", "=", "[", "3", ",", "4", "]", ",", "numOnBitsPerPattern", "=", "3", ",", "patternOverlap", "=", "0", ",", "seqOverlap", "=", "0", ",", "*", ...
Create training sequences that share some elements in the middle. Parameters: ----------------------------------------------------- numSequences: Number of unique training sequences to generate seqLen: Overall length of each sequence sharedElements: Which element indices of each sequence are shared. These will be in the range between 0 and seqLen-1 numOnBitsPerPattern: Number of ON bits in each TM input pattern patternOverlap: Max number of bits of overlap between any 2 patterns retval: (numCols, trainingSequences) numCols - width of the patterns trainingSequences - a list of training sequences
[ "Create", "training", "sequences", "that", "share", "some", "elements", "in", "the", "middle", "." ]
python
valid
rigetti/pyquil
pyquil/latex/latex_generation.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/latex/latex_generation.py#L358-L379
def _cz_gate(self, lines): """ Return the TikZ code for an n-controlled Z-gate. :param lines: List of all qubits involved. :type: list[int] """ line = lines[0] delta_pos = self._gate_offset(Z) gate_width = self._gate_width(Z) gate_str = self._phase(line, self.pos[line]) for ctrl in lines[1:]: gate_str += self._phase(ctrl, self.pos[line]) gate_str += self._line(ctrl, line) new_pos = self.pos[line] + delta_pos + gate_width for i in lines: self.op_count[i] += 1 for i in range(min(lines), max(lines) + 1): self.pos[i] = new_pos return gate_str
[ "def", "_cz_gate", "(", "self", ",", "lines", ")", ":", "line", "=", "lines", "[", "0", "]", "delta_pos", "=", "self", ".", "_gate_offset", "(", "Z", ")", "gate_width", "=", "self", ".", "_gate_width", "(", "Z", ")", "gate_str", "=", "self", ".", "...
Return the TikZ code for an n-controlled Z-gate. :param lines: List of all qubits involved. :type: list[int]
[ "Return", "the", "TikZ", "code", "for", "an", "n", "-", "controlled", "Z", "-", "gate", "." ]
python
train