repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
droope/droopescan
dscan/common/update_api.py
https://github.com/droope/droopescan/blob/424c48a0f9d12b4536dbef5a786f0fbd4ce9519a/dscan/common/update_api.py#L159-L175
def hashes_get(versions_file, base_path): """ Gets hashes for currently checked out version. @param versions_file: a common.VersionsFile instance to check against. @param base_path: where to look for files. e.g. './.update-workspace/silverstripe/' @return: checksums {'file1': 'hash1'} """ files = versions_file.files_get_all() result = {} for f in files: try: result[f] = functions.md5_file(base_path + f) except IOError: # Not all files exist for all versions. pass return result
[ "def", "hashes_get", "(", "versions_file", ",", "base_path", ")", ":", "files", "=", "versions_file", ".", "files_get_all", "(", ")", "result", "=", "{", "}", "for", "f", "in", "files", ":", "try", ":", "result", "[", "f", "]", "=", "functions", ".", ...
Gets hashes for currently checked out version. @param versions_file: a common.VersionsFile instance to check against. @param base_path: where to look for files. e.g. './.update-workspace/silverstripe/' @return: checksums {'file1': 'hash1'}
[ "Gets", "hashes", "for", "currently", "checked", "out", "version", "." ]
python
train
mikedh/trimesh
trimesh/path/packing.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/packing.py#L178-L222
def pack_paths(paths, sheet_size=None): """ Pack a list of Path2D objects into a rectangle. Parameters ------------ paths: (n,) Path2D Geometry to be packed Returns ------------ packed : trimesh.path.Path2D Object containing input geometry inserted : (m,) int Indexes of paths inserted into result """ from .util import concatenate if sheet_size is not None: sheet_size = np.sort(sheet_size)[::-1] quantity = [] for path in paths: if 'quantity' in path.metadata: quantity.append(path.metadata['quantity']) else: quantity.append(1) # pack using exterior polygon (will OBB) polygons = [i.polygons_closed[i.root[0]] for i in paths] # pack the polygons using rectangular bin packing inserted, transforms = multipack(polygons=polygons, quantity=quantity, sheet_size=sheet_size) multi = [] for i, T in zip(inserted, transforms): multi.append(paths[i].copy()) multi[-1].apply_transform(T) # append all packed paths into a single Path object packed = concatenate(multi) return packed, inserted
[ "def", "pack_paths", "(", "paths", ",", "sheet_size", "=", "None", ")", ":", "from", ".", "util", "import", "concatenate", "if", "sheet_size", "is", "not", "None", ":", "sheet_size", "=", "np", ".", "sort", "(", "sheet_size", ")", "[", ":", ":", "-", ...
Pack a list of Path2D objects into a rectangle. Parameters ------------ paths: (n,) Path2D Geometry to be packed Returns ------------ packed : trimesh.path.Path2D Object containing input geometry inserted : (m,) int Indexes of paths inserted into result
[ "Pack", "a", "list", "of", "Path2D", "objects", "into", "a", "rectangle", "." ]
python
train
vatlab/SoS
src/sos/utils.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/utils.py#L1498-L1504
def sample_lines(lines, n): '''Draw a sample of n lines from filename, largely evenly.''' if len(lines) <= n: return ''.join(lines) else: m = len(lines) return ''.join([lines[x * m // n + m // (2 * n)] for x in range(n)])
[ "def", "sample_lines", "(", "lines", ",", "n", ")", ":", "if", "len", "(", "lines", ")", "<=", "n", ":", "return", "''", ".", "join", "(", "lines", ")", "else", ":", "m", "=", "len", "(", "lines", ")", "return", "''", ".", "join", "(", "[", "...
Draw a sample of n lines from filename, largely evenly.
[ "Draw", "a", "sample", "of", "n", "lines", "from", "filename", "largely", "evenly", "." ]
python
train
jleclanche/fireplace
fireplace/game.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/game.py#L203-L223
def trigger_actions(self, source, actions): """ Performs a list of `actions` from `source`. This should seldom be called directly - use `queue_actions` instead. """ ret = [] for action in actions: if isinstance(action, EventListener): # Queuing an EventListener registers it as a one-time event # This allows registering events from eg. play actions self.log("Registering event listener %r on %r", action, self) action.once = True # FIXME: Figure out a cleaner way to get the event listener target if source.type == CardType.SPELL: listener = source.controller else: listener = source listener._events.append(action) else: ret.append(action.trigger(source)) return ret
[ "def", "trigger_actions", "(", "self", ",", "source", ",", "actions", ")", ":", "ret", "=", "[", "]", "for", "action", "in", "actions", ":", "if", "isinstance", "(", "action", ",", "EventListener", ")", ":", "# Queuing an EventListener registers it as a one-time...
Performs a list of `actions` from `source`. This should seldom be called directly - use `queue_actions` instead.
[ "Performs", "a", "list", "of", "actions", "from", "source", ".", "This", "should", "seldom", "be", "called", "directly", "-", "use", "queue_actions", "instead", "." ]
python
train
tgbugs/pyontutils
nifstd/nifstd_tools/parcellation/coco.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/nifstd/nifstd_tools/parcellation/coco.py#L164-L171
def datagetter(cls): """ example datagetter function, make any local modifications here """ with open('myfile', 'rt') as f: rows = [r for r in csv.reader(f)] dothing = lambda _: [i for i, v in enumerate(_)] rows = [dothing(_) for _ in rows] raise NotImplementedError('You need to implement this yourlself!') return rows
[ "def", "datagetter", "(", "cls", ")", ":", "with", "open", "(", "'myfile'", ",", "'rt'", ")", "as", "f", ":", "rows", "=", "[", "r", "for", "r", "in", "csv", ".", "reader", "(", "f", ")", "]", "dothing", "=", "lambda", "_", ":", "[", "i", "fo...
example datagetter function, make any local modifications here
[ "example", "datagetter", "function", "make", "any", "local", "modifications", "here" ]
python
train
PrefPy/prefpy
prefpy/mechanism.py
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L475-L511
def computeStrongestPaths(self, profile, pairwisePreferences): """ Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with the strongest path from cand1 to cand2. :ivar Profile profile: A Profile object that represents an election profile. :ivar dict<int,dict<int,int>> pairwisePreferences: A two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with number of voters who prefer cand1 to cand2. """ cands = profile.candMap.keys() numCands = len(cands) # Initialize the two-dimensional dictionary that will hold our strongest paths. strongestPaths = dict() for cand in cands: strongestPaths[cand] = dict() for i in range(1, numCands + 1): for j in range(1, numCands + 1): if (i == j): continue if pairwisePreferences[i][j] > pairwisePreferences[j][i]: strongestPaths[i][j] = pairwisePreferences[i][j] else: strongestPaths[i][j] = 0 for i in range(1, numCands + 1): for j in range(1, numCands + 1): if (i == j): continue for k in range(1, numCands + 1): if (i == k or j == k): continue strongestPaths[j][k] = max(strongestPaths[j][k], min(strongestPaths[j][i], strongestPaths[i][k])) return strongestPaths
[ "def", "computeStrongestPaths", "(", "self", ",", "profile", ",", "pairwisePreferences", ")", ":", "cands", "=", "profile", ".", "candMap", ".", "keys", "(", ")", "numCands", "=", "len", "(", "cands", ")", "# Initialize the two-dimensional dictionary that will hold ...
Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with the strongest path from cand1 to cand2. :ivar Profile profile: A Profile object that represents an election profile. :ivar dict<int,dict<int,int>> pairwisePreferences: A two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with number of voters who prefer cand1 to cand2.
[ "Returns", "a", "two", "-", "dimensional", "dictionary", "that", "associates", "every", "pair", "of", "candidates", "cand1", "and", "cand2", "with", "the", "strongest", "path", "from", "cand1", "to", "cand2", "." ]
python
train
googleapis/google-cloud-python
datastore/google/cloud/datastore/batch.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/batch.py#L260-L276
def commit(self): """Commits the batch. This is called automatically upon exiting a with statement, however it can be called explicitly if you don't want to use a context manager. :raises: :class:`~exceptions.ValueError` if the batch is not in progress. """ if self._status != self._IN_PROGRESS: raise ValueError("Batch must be in progress to commit()") try: self._commit() finally: self._status = self._FINISHED
[ "def", "commit", "(", "self", ")", ":", "if", "self", ".", "_status", "!=", "self", ".", "_IN_PROGRESS", ":", "raise", "ValueError", "(", "\"Batch must be in progress to commit()\"", ")", "try", ":", "self", ".", "_commit", "(", ")", "finally", ":", "self", ...
Commits the batch. This is called automatically upon exiting a with statement, however it can be called explicitly if you don't want to use a context manager. :raises: :class:`~exceptions.ValueError` if the batch is not in progress.
[ "Commits", "the", "batch", "." ]
python
train
twilio/twilio-python
twilio/rest/serverless/v1/service/asset/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/serverless/v1/service/asset/__init__.py#L120-L137
def create(self, friendly_name): """ Create a new AssetInstance :param unicode friendly_name: The friendly_name :returns: Newly created AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetInstance """ data = values.of({'FriendlyName': friendly_name, }) payload = self._version.create( 'POST', self._uri, data=data, ) return AssetInstance(self._version, payload, service_sid=self._solution['service_sid'], )
[ "def", "create", "(", "self", ",", "friendly_name", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'FriendlyName'", ":", "friendly_name", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "create", "(", "'POST'", ",", "self", ".", ...
Create a new AssetInstance :param unicode friendly_name: The friendly_name :returns: Newly created AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetInstance
[ "Create", "a", "new", "AssetInstance" ]
python
train
allenai/allennlp
allennlp/nn/util.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L919-L941
def logsumexp(tensor: torch.Tensor, dim: int = -1, keepdim: bool = False) -> torch.Tensor: """ A numerically stable computation of logsumexp. This is mathematically equivalent to `tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log probabilities. Parameters ---------- tensor : torch.FloatTensor, required. A tensor of arbitrary size. dim : int, optional (default = -1) The dimension of the tensor to apply the logsumexp to. keepdim: bool, optional (default = False) Whether to retain a dimension of size one at the dimension we reduce over. """ max_score, _ = tensor.max(dim, keepdim=keepdim) if keepdim: stable_vec = tensor - max_score else: stable_vec = tensor - max_score.unsqueeze(dim) return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
[ "def", "logsumexp", "(", "tensor", ":", "torch", ".", "Tensor", ",", "dim", ":", "int", "=", "-", "1", ",", "keepdim", ":", "bool", "=", "False", ")", "->", "torch", ".", "Tensor", ":", "max_score", ",", "_", "=", "tensor", ".", "max", "(", "dim"...
A numerically stable computation of logsumexp. This is mathematically equivalent to `tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log probabilities. Parameters ---------- tensor : torch.FloatTensor, required. A tensor of arbitrary size. dim : int, optional (default = -1) The dimension of the tensor to apply the logsumexp to. keepdim: bool, optional (default = False) Whether to retain a dimension of size one at the dimension we reduce over.
[ "A", "numerically", "stable", "computation", "of", "logsumexp", ".", "This", "is", "mathematically", "equivalent", "to", "tensor", ".", "exp", "()", ".", "sum", "(", "dim", "keep", "=", "keepdim", ")", ".", "log", "()", ".", "This", "function", "is", "ty...
python
train
saimn/sigal
sigal/plugins/watermark.py
https://github.com/saimn/sigal/blob/912ca39991355d358dc85fd55c7aeabdd7acc386/sigal/plugins/watermark.py#L55-L80
def watermark(im, mark, position, opacity=1): """Adds a watermark to an image.""" if opacity < 1: mark = reduce_opacity(mark, opacity) if im.mode != 'RGBA': im = im.convert('RGBA') # create a transparent layer the size of the image and draw the # watermark in that layer. layer = Image.new('RGBA', im.size, (0, 0, 0, 0)) if position == 'tile': for y in range(0, im.size[1], mark.size[1]): for x in range(0, im.size[0], mark.size[0]): layer.paste(mark, (x, y)) elif position == 'scale': # scale, but preserve the aspect ratio ratio = min( float(im.size[0]) / mark.size[0], float(im.size[1]) / mark.size[1]) w = int(mark.size[0] * ratio) h = int(mark.size[1] * ratio) mark = mark.resize((w, h)) layer.paste(mark, (int((im.size[0] - w) / 2), int((im.size[1] - h) / 2))) else: layer.paste(mark, position) # composite the watermark with the layer return Image.composite(layer, im, layer)
[ "def", "watermark", "(", "im", ",", "mark", ",", "position", ",", "opacity", "=", "1", ")", ":", "if", "opacity", "<", "1", ":", "mark", "=", "reduce_opacity", "(", "mark", ",", "opacity", ")", "if", "im", ".", "mode", "!=", "'RGBA'", ":", "im", ...
Adds a watermark to an image.
[ "Adds", "a", "watermark", "to", "an", "image", "." ]
python
valid
QuantEcon/QuantEcon.py
quantecon/game_theory/support_enumeration.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/game_theory/support_enumeration.py#L112-L180
def _indiff_mixed_action(payoff_matrix, own_supp, opp_supp, A, out): """ Given a player's payoff matrix `payoff_matrix`, an array `own_supp` of this player's actions, and an array `opp_supp` of the opponent's actions, each of length k, compute the opponent's mixed action whose support equals `opp_supp` and for which the player is indifferent among the actions in `own_supp`, if any such exists. Return `True` if such a mixed action exists and actions in `own_supp` are indeed best responses to it, in which case the outcome is stored in `out`; `False` otherwise. Array `A` is used in intermediate steps. Parameters ---------- payoff_matrix : ndarray(ndim=2) The player's payoff matrix, of shape (m, n). own_supp : ndarray(int, ndim=1) Array containing the player's action indices, of length k. opp_supp : ndarray(int, ndim=1) Array containing the opponent's action indices, of length k. A : ndarray(float, ndim=2) Array used in intermediate steps, of shape (k+1, k+1). out : ndarray(float, ndim=1) Array of length k+1 to store the k nonzero values of the desired mixed action in `out[:-1]` (and the payoff value in `out[-1]`). Returns ------- bool `True` if a desired mixed action exists and `False` otherwise. """ m = payoff_matrix.shape[0] k = len(own_supp) for i in range(k): for j in range(k): A[j, i] = payoff_matrix[own_supp[i], opp_supp[j]] # transpose A[:-1, -1] = 1 A[-1, :-1] = -1 A[-1, -1] = 0 out[:-1] = 0 out[-1] = 1 r = _numba_linalg_solve(A, out) if r != 0: # A: singular return False for i in range(k): if out[i] <= 0: return False val = out[-1] if k == m: return True own_supp_flags = np.zeros(m, np.bool_) own_supp_flags[own_supp] = True for i in range(m): if not own_supp_flags[i]: payoff = 0 for j in range(k): payoff += payoff_matrix[i, opp_supp[j]] * out[j] if payoff > val: return False return True
[ "def", "_indiff_mixed_action", "(", "payoff_matrix", ",", "own_supp", ",", "opp_supp", ",", "A", ",", "out", ")", ":", "m", "=", "payoff_matrix", ".", "shape", "[", "0", "]", "k", "=", "len", "(", "own_supp", ")", "for", "i", "in", "range", "(", "k",...
Given a player's payoff matrix `payoff_matrix`, an array `own_supp` of this player's actions, and an array `opp_supp` of the opponent's actions, each of length k, compute the opponent's mixed action whose support equals `opp_supp` and for which the player is indifferent among the actions in `own_supp`, if any such exists. Return `True` if such a mixed action exists and actions in `own_supp` are indeed best responses to it, in which case the outcome is stored in `out`; `False` otherwise. Array `A` is used in intermediate steps. Parameters ---------- payoff_matrix : ndarray(ndim=2) The player's payoff matrix, of shape (m, n). own_supp : ndarray(int, ndim=1) Array containing the player's action indices, of length k. opp_supp : ndarray(int, ndim=1) Array containing the opponent's action indices, of length k. A : ndarray(float, ndim=2) Array used in intermediate steps, of shape (k+1, k+1). out : ndarray(float, ndim=1) Array of length k+1 to store the k nonzero values of the desired mixed action in `out[:-1]` (and the payoff value in `out[-1]`). Returns ------- bool `True` if a desired mixed action exists and `False` otherwise.
[ "Given", "a", "player", "s", "payoff", "matrix", "payoff_matrix", "an", "array", "own_supp", "of", "this", "player", "s", "actions", "and", "an", "array", "opp_supp", "of", "the", "opponent", "s", "actions", "each", "of", "length", "k", "compute", "the", "...
python
train
spacetelescope/drizzlepac
drizzlepac/hlautils/catalog_utils.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/hlautils/catalog_utils.py#L20-L60
def randomSelectFromCSV(tableName, numEntries, seedValue): """Function to extract random entries (lines) from a CSV file Parameters ========== tableName: str Filename of the input master CSV file containing individual images or association names, as well as observational information regarding the images numEntries : int Number of entries/rows to extract from the master input CSV file seedValue : int Value used to initialize the random number generator for the selection of random entries Returns ======= outputTable : object Astropy Table object """ # Initialize the random number generator seed(seedValue) # Get the contents of the table dataTable = Table.read(tableName, format='ascii.csv') numRows = len(dataTable) # Generate a sequence of integers the size of the table, and then # obtain a random subset of the sequence with no duplicate selections sequence = list(range(numRows)) subset = sample(sequence, numEntries) # Extract the subset rows... outputTable = dataTable[subset] #outputTable = dataTable[0:numEntries] # Returns the outputTable which is an Astropy Table object return(outputTable)
[ "def", "randomSelectFromCSV", "(", "tableName", ",", "numEntries", ",", "seedValue", ")", ":", "# Initialize the random number generator", "seed", "(", "seedValue", ")", "# Get the contents of the table", "dataTable", "=", "Table", ".", "read", "(", "tableName", ",", ...
Function to extract random entries (lines) from a CSV file Parameters ========== tableName: str Filename of the input master CSV file containing individual images or association names, as well as observational information regarding the images numEntries : int Number of entries/rows to extract from the master input CSV file seedValue : int Value used to initialize the random number generator for the selection of random entries Returns ======= outputTable : object Astropy Table object
[ "Function", "to", "extract", "random", "entries", "(", "lines", ")", "from", "a", "CSV", "file" ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/shared.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/shared.py#L209-L222
def get_sv_chroms(items, exclude_file): """Retrieve chromosomes to process on, avoiding extra skipped chromosomes. """ exclude_regions = {} for region in pybedtools.BedTool(exclude_file): if int(region.start) == 0: exclude_regions[region.chrom] = int(region.end) out = [] with pysam.Samfile(dd.get_align_bam(items[0]) or dd.get_work_bam(items[0]))as pysam_work_bam: for chrom, length in zip(pysam_work_bam.references, pysam_work_bam.lengths): exclude_length = exclude_regions.get(chrom, 0) if exclude_length < length: out.append(chrom) return out
[ "def", "get_sv_chroms", "(", "items", ",", "exclude_file", ")", ":", "exclude_regions", "=", "{", "}", "for", "region", "in", "pybedtools", ".", "BedTool", "(", "exclude_file", ")", ":", "if", "int", "(", "region", ".", "start", ")", "==", "0", ":", "e...
Retrieve chromosomes to process on, avoiding extra skipped chromosomes.
[ "Retrieve", "chromosomes", "to", "process", "on", "avoiding", "extra", "skipped", "chromosomes", "." ]
python
train
fhs/pyhdf
pyhdf/SD.py
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1922-L2001
def set(self, data, start=None, count=None, stride=None): """Write data to the dataset. Args:: data : array of data to write; can be given as a numpy array, or as Python sequence (whose elements can be imbricated sequences) start : indices where to start writing in the dataset; default to 0 on all dimensions count : number of values to write along each dimension; default to the current length of dataset dimensions stride : sampling interval along each dimension; default to 1 on all dimensions For n-dimensional datasets, those 3 parameters are entered using lists. For one-dimensional datasets, integers can also be used. Note that, to write the whole dataset at once, one has simply to call the method with the dataset values in parameter 'data', omitting all other parameters. Returns:: None. C library equivalent : SDwritedata The dataset can also be written using the familiar indexing and slicing notation, like ordinary python sequences. See "High level variable access". """ # Obtain SDS info. try: sds_name, rank, dim_sizes, data_type, n_attrs = self.info() if isinstance(dim_sizes, type(1)): dim_sizes = [dim_sizes] except HDF4Error: raise HDF4Error('set : cannot execute') # Validate args. if start is None: start = [0] * rank elif isinstance(start, type(1)): start = [start] if count is None: count = dim_sizes if count[0] == 0: count[0] = 1 elif isinstance(count, type(1)): count = [count] if stride is None: stride = [1] * rank elif isinstance(stride, type(1)): stride = [stride] if len(start) != rank or len(count) != rank or len(stride) != rank: raise HDF4Error('set : start, stride or count '\ 'do not match SDS rank') unlimited = self.isrecord() for n in range(rank): ok = 1 if start[n] < 0: ok = 0 elif n > 0 or not unlimited: if start[n] + (abs(count[n]) - 1) * stride[n] >= dim_sizes[n]: ok = 0 if not ok: raise HDF4Error('set arguments violate '\ 'the size (%d) of dimension %d' \ % (dim_sizes[n], n)) # ??? Check support for UINT16 if not data_type in SDC.equivNumericTypes: raise HDF4Error('set cannot currrently deal '\ 'with the SDS data type') _C._SDwritedata_0(self._id, data_type, start, count, data, stride)
[ "def", "set", "(", "self", ",", "data", ",", "start", "=", "None", ",", "count", "=", "None", ",", "stride", "=", "None", ")", ":", "# Obtain SDS info.", "try", ":", "sds_name", ",", "rank", ",", "dim_sizes", ",", "data_type", ",", "n_attrs", "=", "s...
Write data to the dataset. Args:: data : array of data to write; can be given as a numpy array, or as Python sequence (whose elements can be imbricated sequences) start : indices where to start writing in the dataset; default to 0 on all dimensions count : number of values to write along each dimension; default to the current length of dataset dimensions stride : sampling interval along each dimension; default to 1 on all dimensions For n-dimensional datasets, those 3 parameters are entered using lists. For one-dimensional datasets, integers can also be used. Note that, to write the whole dataset at once, one has simply to call the method with the dataset values in parameter 'data', omitting all other parameters. Returns:: None. C library equivalent : SDwritedata The dataset can also be written using the familiar indexing and slicing notation, like ordinary python sequences. See "High level variable access".
[ "Write", "data", "to", "the", "dataset", "." ]
python
train
oemof/oemof.db
oemof/db/__init__.py
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/__init__.py#L11-L73
def url(section="postGIS", config_file=None): """ Retrieve the URL used to connect to the database. Use this if you have your own means of accessing the database and do not want to use :func:`engine` or :func:`connection`. Parameters ---------- section : str, optional The `config.ini` section corresponding to the targeted database. It should contain all the details that needed to set up a connection. Returns ------- database URL : str The URL with which one can connect to the database. Be careful as this will probably contain sensitive data like the username/password combination. config_file : str, optional Relative of absolute of config.ini. If not specified, it tries to read from .oemof/config.ini in your HOME dir Notes ----- For documentation on config.ini see the README section on :ref:`configuring <readme#configuration>` :mod:`oemof.db`. """ cfg.load_config(config_file) try: pw = keyring.get_password(cfg.get(section, "database"), cfg.get(section, "username")) except NoSectionError as e: print("There is no section {section} in your config file. Please " "choose one available section from your config file or " "specify a new one!".format( section=section)) exit(-1) if pw is None: try: pw = cfg.get(section, "pw") except option: pw = getpass.getpass(prompt="No password available in your "\ "keyring for database {database}. " "\n\nEnter your password to " \ "store it in " "keyring:".format(database=section)) keyring.set_password(section, cfg.get(section, "username"), pw) except NoSectionError: print("Unable to find the 'postGIS' section in oemof's config." + "\nExiting.") exit(-1) return "postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}".format( user=cfg.get(section, "username"), passwd=pw, host=cfg.get(section, "host"), db=cfg.get(section, "database"), port=int(cfg.get(section, "port")))
[ "def", "url", "(", "section", "=", "\"postGIS\"", ",", "config_file", "=", "None", ")", ":", "cfg", ".", "load_config", "(", "config_file", ")", "try", ":", "pw", "=", "keyring", ".", "get_password", "(", "cfg", ".", "get", "(", "section", ",", "\"data...
Retrieve the URL used to connect to the database. Use this if you have your own means of accessing the database and do not want to use :func:`engine` or :func:`connection`. Parameters ---------- section : str, optional The `config.ini` section corresponding to the targeted database. It should contain all the details that needed to set up a connection. Returns ------- database URL : str The URL with which one can connect to the database. Be careful as this will probably contain sensitive data like the username/password combination. config_file : str, optional Relative of absolute of config.ini. If not specified, it tries to read from .oemof/config.ini in your HOME dir Notes ----- For documentation on config.ini see the README section on :ref:`configuring <readme#configuration>` :mod:`oemof.db`.
[ "Retrieve", "the", "URL", "used", "to", "connect", "to", "the", "database", "." ]
python
train
gwastro/pycbc
pycbc/io/record.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/record.py#L1215-L1226
def aliases(self): """Returns a dictionary of the aliases, or "titles", of the field names in self. An alias can be specified by passing a tuple in the name part of the dtype. For example, if an array is created with ``dtype=[(('foo', 'bar'), float)]``, the array will have a field called `bar` that has alias `foo` that can be accessed using either `arr['foo']` or `arr['bar']`. Note that the first string in the dtype is the alias, the second the name. This function returns a dictionary in which the aliases are the keys and the names are the values. Only fields that have aliases are returned. """ return dict(c[0] for c in self.dtype.descr if isinstance(c[0], tuple))
[ "def", "aliases", "(", "self", ")", ":", "return", "dict", "(", "c", "[", "0", "]", "for", "c", "in", "self", ".", "dtype", ".", "descr", "if", "isinstance", "(", "c", "[", "0", "]", ",", "tuple", ")", ")" ]
Returns a dictionary of the aliases, or "titles", of the field names in self. An alias can be specified by passing a tuple in the name part of the dtype. For example, if an array is created with ``dtype=[(('foo', 'bar'), float)]``, the array will have a field called `bar` that has alias `foo` that can be accessed using either `arr['foo']` or `arr['bar']`. Note that the first string in the dtype is the alias, the second the name. This function returns a dictionary in which the aliases are the keys and the names are the values. Only fields that have aliases are returned.
[ "Returns", "a", "dictionary", "of", "the", "aliases", "or", "titles", "of", "the", "field", "names", "in", "self", ".", "An", "alias", "can", "be", "specified", "by", "passing", "a", "tuple", "in", "the", "name", "part", "of", "the", "dtype", ".", "For...
python
train
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L82-L86
def _add_goterms_kws(self, go2obj_user, kws_gos): """Add more GOTerms to go2obj_user, if requested and relevant.""" if 'go2color' in kws_gos: for goid in kws_gos['go2color'].keys(): self._add_goterms(go2obj_user, goid)
[ "def", "_add_goterms_kws", "(", "self", ",", "go2obj_user", ",", "kws_gos", ")", ":", "if", "'go2color'", "in", "kws_gos", ":", "for", "goid", "in", "kws_gos", "[", "'go2color'", "]", ".", "keys", "(", ")", ":", "self", ".", "_add_goterms", "(", "go2obj_...
Add more GOTerms to go2obj_user, if requested and relevant.
[ "Add", "more", "GOTerms", "to", "go2obj_user", "if", "requested", "and", "relevant", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/namedb.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/namedb.py#L771-L785
def get_name_DID_info(self, name, lastblock=None): """ Given a name, find its DID (decentralized identifier) information. Returns {'address': ..., 'index': ...} Returns None if there is no such name """ if lastblock is None: lastblock = self.lastblock cur = self.db.cursor() did_info = namedb_get_name_DID_info(cur, name, lastblock) if did_info is None: return None return did_info
[ "def", "get_name_DID_info", "(", "self", ",", "name", ",", "lastblock", "=", "None", ")", ":", "if", "lastblock", "is", "None", ":", "lastblock", "=", "self", ".", "lastblock", "cur", "=", "self", ".", "db", ".", "cursor", "(", ")", "did_info", "=", ...
Given a name, find its DID (decentralized identifier) information. Returns {'address': ..., 'index': ...} Returns None if there is no such name
[ "Given", "a", "name", "find", "its", "DID", "(", "decentralized", "identifier", ")", "information", ".", "Returns", "{", "address", ":", "...", "index", ":", "...", "}", "Returns", "None", "if", "there", "is", "no", "such", "name" ]
python
train
PrefPy/prefpy
prefpy/mechanism.py
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L1780-L1825
def PluRunOff_single_winner(self, profile): """ Returns a number that associates the winner of a profile under Plurality with Runoff rule. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc" and elecType != "csv": print("ERROR: unsupported election type") exit() # Initialization prefcounts = profile.getPreferenceCounts() len_prefcounts = len(prefcounts) rankmaps = profile.getRankMaps() ranking = MechanismPlurality().getRanking(profile) # 1st round: find the top 2 candidates in plurality scores # Compute the 1st-place candidate in plurality scores # print(ranking) max_cand = ranking[0][0][0] # Compute the 2nd-place candidate in plurality scores # Automatically using tie-breaking rule--numerically increasing order if len(ranking[0][0]) > 1: second_max_cand = ranking[0][0][1] else: second_max_cand = ranking[0][1][0] top_2 = [max_cand, second_max_cand] # 2nd round: find the candidate with maximum plurality score dict_top2 = {max_cand: 0, second_max_cand: 0} for i in range(len_prefcounts): vote_top2 = {key: value for key, value in rankmaps[i].items() if key in top_2} top_position = min(vote_top2.values()) keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position] for key in keys: dict_top2[key] += prefcounts[i] # print(dict_top2) winner = max(dict_top2.items(), key=lambda x: x[1])[0] return winner
[ "def", "PluRunOff_single_winner", "(", "self", ",", "profile", ")", ":", "# Currently, we expect the profile to contain complete ordering over candidates. Ties are", "# allowed however.", "elecType", "=", "profile", ".", "getElecType", "(", ")", "if", "elecType", "!=", "\"soc...
Returns a number that associates the winner of a profile under Plurality with Runoff rule. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "number", "that", "associates", "the", "winner", "of", "a", "profile", "under", "Plurality", "with", "Runoff", "rule", "." ]
python
train
calocan/rescape-python-helpers
rescape_python_helpers/functional/ramda.py
https://github.com/calocan/rescape-python-helpers/blob/91a1724f062ee40a25aa60fc96b2d7acadd99618/rescape_python_helpers/functional/ramda.py#L280-L302
def pick_deep(pick_dct, dct): """ Implementation of pick that recurses. This tests the same keys at every level of dict and in lists :param pick_dct: Deep dict matching some portion of dct. :param dct: Dct to filter. Any key matching pick_dct pass through. It doesn't matter what the pick_dct value is as long as the key exists. Arrays also pass through if the have matching values in pick_dct :return: """ if isinstance(dict, dct): # Filter out keys and then recurse on each value that wasn't filtered out return map_with_obj( lambda k, v: pick_deep(prop(k, pick_dct), v), pick(keys(pick_dct), dct) ) if isinstance((list, tuple), dct): # run pick_deep on each value return map( lambda tup: pick_deep(*tup), list(zip(pick_dct or [], dct)) ) # scalar return dct
[ "def", "pick_deep", "(", "pick_dct", ",", "dct", ")", ":", "if", "isinstance", "(", "dict", ",", "dct", ")", ":", "# Filter out keys and then recurse on each value that wasn't filtered out", "return", "map_with_obj", "(", "lambda", "k", ",", "v", ":", "pick_deep", ...
Implementation of pick that recurses. This tests the same keys at every level of dict and in lists :param pick_dct: Deep dict matching some portion of dct. :param dct: Dct to filter. Any key matching pick_dct pass through. It doesn't matter what the pick_dct value is as long as the key exists. Arrays also pass through if the have matching values in pick_dct :return:
[ "Implementation", "of", "pick", "that", "recurses", ".", "This", "tests", "the", "same", "keys", "at", "every", "level", "of", "dict", "and", "in", "lists", ":", "param", "pick_dct", ":", "Deep", "dict", "matching", "some", "portion", "of", "dct", ".", "...
python
train
mila/pyoo
pyoo.py
https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L157-L186
def _clean_index(key, length): """ Validates and normalizes a cell range index. >>> _clean_index(0, 10) 0 >>> _clean_index(-10, 10) 0 >>> _clean_index(10, 10) Traceback (most recent call last): ... IndexError: Cell index out of range. >>> _clean_index(-11, 10) Traceback (most recent call last): ... IndexError: Cell index out of range. >>> _clean_index(None, 10) Traceback (most recent call last): ... TypeError: Cell indices must be integers, NoneType given. """ if not isinstance(key, integer_types): raise TypeError('Cell indices must be integers, %s given.' % type(key).__name__) if -length <= key < 0: return key + length elif 0 <= key < length: return key else: raise IndexError('Cell index out of range.')
[ "def", "_clean_index", "(", "key", ",", "length", ")", ":", "if", "not", "isinstance", "(", "key", ",", "integer_types", ")", ":", "raise", "TypeError", "(", "'Cell indices must be integers, %s given.'", "%", "type", "(", "key", ")", ".", "__name__", ")", "i...
Validates and normalizes a cell range index. >>> _clean_index(0, 10) 0 >>> _clean_index(-10, 10) 0 >>> _clean_index(10, 10) Traceback (most recent call last): ... IndexError: Cell index out of range. >>> _clean_index(-11, 10) Traceback (most recent call last): ... IndexError: Cell index out of range. >>> _clean_index(None, 10) Traceback (most recent call last): ... TypeError: Cell indices must be integers, NoneType given.
[ "Validates", "and", "normalizes", "a", "cell", "range", "index", "." ]
python
train
yyuu/botornado
boto/sdb/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/sdb/connection.py#L429-L454
def batch_put_attributes(self, domain_or_name, items, replace=True): """ Store attributes for multiple items in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are themselves dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName' : domain_name} self._build_batch_list(params, items, replace) return self.get_status('BatchPutAttributes', params, verb='POST')
[ "def", "batch_put_attributes", "(", "self", ",", "domain_or_name", ",", "items", ",", "replace", "=", "True", ")", ":", "domain", ",", "domain_name", "=", "self", ".", "get_domain_and_name", "(", "domain_or_name", ")", "params", "=", "{", "'DomainName'", ":", ...
Store attributes for multiple items in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are themselves dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful
[ "Store", "attributes", "for", "multiple", "items", "in", "a", "domain", ".", ":", "type", "domain_or_name", ":", "string", "or", ":", "class", ":", "boto", ".", "sdb", ".", "domain", ".", "Domain", "object", ".", ":", "param", "domain_or_name", ":", "Eit...
python
train
sci-bots/pygtkhelpers
pygtkhelpers/ui/objectlist/__init__.py
https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/objectlist/__init__.py#L67-L87
def get_list_store(data_frame): ''' Return a `pandas.DataFrame` containing Python type information for the columns in `data_frame` and a `gtk.ListStore` matching the contents of the data frame. Args: data_frame (pandas.DataFrame) : Data frame containing data columns. Returns: (tuple) : The first element is a data frame as returned by `get_py_dtypes` and the second element is a `gtk.ListStore` matching the contents of the data frame. ''' df_py_dtypes = get_py_dtypes(data_frame) list_store = gtk.ListStore(*df_py_dtypes.dtype) for i, row_i in data_frame.iterrows(): list_store.append(row_i.tolist()) return df_py_dtypes, list_store
[ "def", "get_list_store", "(", "data_frame", ")", ":", "df_py_dtypes", "=", "get_py_dtypes", "(", "data_frame", ")", "list_store", "=", "gtk", ".", "ListStore", "(", "*", "df_py_dtypes", ".", "dtype", ")", "for", "i", ",", "row_i", "in", "data_frame", ".", ...
Return a `pandas.DataFrame` containing Python type information for the columns in `data_frame` and a `gtk.ListStore` matching the contents of the data frame. Args: data_frame (pandas.DataFrame) : Data frame containing data columns. Returns: (tuple) : The first element is a data frame as returned by `get_py_dtypes` and the second element is a `gtk.ListStore` matching the contents of the data frame.
[ "Return", "a", "pandas", ".", "DataFrame", "containing", "Python", "type", "information", "for", "the", "columns", "in", "data_frame", "and", "a", "gtk", ".", "ListStore", "matching", "the", "contents", "of", "the", "data", "frame", "." ]
python
train
ajdavis/mongo-mockup-db
mockupdb/__init__.py
https://github.com/ajdavis/mongo-mockup-db/blob/ff8a3f793def59e9037397ef60607fbda6949dac/mockupdb/__init__.py#L1264-L1280
def run(self): """Begin serving. Returns the bound port, or 0 for domain socket.""" self._listening_sock, self._address = ( bind_domain_socket(self._address) if self._uds_path else bind_tcp_socket(self._address)) if self._ssl: certfile = os.path.join(os.path.dirname(__file__), 'server.pem') self._listening_sock = _ssl.wrap_socket( self._listening_sock, certfile=certfile, server_side=True) self._accept_thread = threading.Thread(target=self._accept_loop) self._accept_thread.daemon = True self._accept_thread.start() return self.port
[ "def", "run", "(", "self", ")", ":", "self", ".", "_listening_sock", ",", "self", ".", "_address", "=", "(", "bind_domain_socket", "(", "self", ".", "_address", ")", "if", "self", ".", "_uds_path", "else", "bind_tcp_socket", "(", "self", ".", "_address", ...
Begin serving. Returns the bound port, or 0 for domain socket.
[ "Begin", "serving", ".", "Returns", "the", "bound", "port", "or", "0", "for", "domain", "socket", "." ]
python
train
CZ-NIC/yangson
yangson/instance.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/instance.py#L705-L711
def _zip(self) -> ArrayValue: """Zip the receiver into an array and return it.""" res = list(self.before) res.reverse() res.append(self.value) res.extend(list(self.after)) return ArrayValue(res, self.timestamp)
[ "def", "_zip", "(", "self", ")", "->", "ArrayValue", ":", "res", "=", "list", "(", "self", ".", "before", ")", "res", ".", "reverse", "(", ")", "res", ".", "append", "(", "self", ".", "value", ")", "res", ".", "extend", "(", "list", "(", "self", ...
Zip the receiver into an array and return it.
[ "Zip", "the", "receiver", "into", "an", "array", "and", "return", "it", "." ]
python
train
oscarbranson/latools
latools/helpers/stat_fns.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/stat_fns.py#L132-L152
def H15_mean(x): """ Calculate the Huber (H15) Robust mean of x. For details, see: http://www.cscjp.co.jp/fera/document/ANALYSTVol114Decpgs1693-97_1989.pdf http://www.rsc.org/images/robust-statistics-technical-brief-6_tcm18-214850.pdf """ mu = np.nanmean(x) sd = np.nanstd(x) * 1.134 sig = 1.5 hi = x > mu + sig * sd lo = x < mu - sig * sd if any(hi | lo): x[hi] = mu + sig * sd x[lo] = mu - sig * sd return H15_mean(x) else: return mu
[ "def", "H15_mean", "(", "x", ")", ":", "mu", "=", "np", ".", "nanmean", "(", "x", ")", "sd", "=", "np", ".", "nanstd", "(", "x", ")", "*", "1.134", "sig", "=", "1.5", "hi", "=", "x", ">", "mu", "+", "sig", "*", "sd", "lo", "=", "x", "<", ...
Calculate the Huber (H15) Robust mean of x. For details, see: http://www.cscjp.co.jp/fera/document/ANALYSTVol114Decpgs1693-97_1989.pdf http://www.rsc.org/images/robust-statistics-technical-brief-6_tcm18-214850.pdf
[ "Calculate", "the", "Huber", "(", "H15", ")", "Robust", "mean", "of", "x", "." ]
python
test
PolyJIT/benchbuild
benchbuild/utils/uchroot.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L121-L125
def clean_env(uchroot_cmd, varnames): """Returns a uchroot cmd that runs inside a filtered environment.""" env = uchroot_cmd["/usr/bin/env"] __clean_env = env["-u", ",".join(varnames)] return __clean_env
[ "def", "clean_env", "(", "uchroot_cmd", ",", "varnames", ")", ":", "env", "=", "uchroot_cmd", "[", "\"/usr/bin/env\"", "]", "__clean_env", "=", "env", "[", "\"-u\"", ",", "\",\"", ".", "join", "(", "varnames", ")", "]", "return", "__clean_env" ]
Returns a uchroot cmd that runs inside a filtered environment.
[ "Returns", "a", "uchroot", "cmd", "that", "runs", "inside", "a", "filtered", "environment", "." ]
python
train
ewels/MultiQC
multiqc/modules/homer/tagdirectory.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/homer/tagdirectory.py#L305-L319
def parse_length_dist(self, f): """ Parse HOMER tagdirectory tagLengthDistribution file. """ parsed_data = dict() firstline = True for l in f['f']: if firstline: #skip first line firstline = False continue s = l.split("\t") if len(s) > 1: k = float(s[0].strip()) v = float(s[1].strip()) parsed_data[k] = v return parsed_data
[ "def", "parse_length_dist", "(", "self", ",", "f", ")", ":", "parsed_data", "=", "dict", "(", ")", "firstline", "=", "True", "for", "l", "in", "f", "[", "'f'", "]", ":", "if", "firstline", ":", "#skip first line", "firstline", "=", "False", "continue", ...
Parse HOMER tagdirectory tagLengthDistribution file.
[ "Parse", "HOMER", "tagdirectory", "tagLengthDistribution", "file", "." ]
python
train
Parsely/birding
src/birding/search.py
https://github.com/Parsely/birding/blob/c7f6eee56424234e361b1a455595de202e744dac/src/birding/search.py#L8-L19
def search_manager_from_config(config, **default_init): """Get a `SearchManager` instance dynamically based on config. `config` is a dictionary containing ``class`` and ``init`` keys as defined in :mod:`birding.config`. """ manager_cls = import_name(config['class'], default_ns='birding.search') init = {} init.update(default_init) init.update(config['init']) manager = manager_cls(**init) return manager
[ "def", "search_manager_from_config", "(", "config", ",", "*", "*", "default_init", ")", ":", "manager_cls", "=", "import_name", "(", "config", "[", "'class'", "]", ",", "default_ns", "=", "'birding.search'", ")", "init", "=", "{", "}", "init", ".", "update",...
Get a `SearchManager` instance dynamically based on config. `config` is a dictionary containing ``class`` and ``init`` keys as defined in :mod:`birding.config`.
[ "Get", "a", "SearchManager", "instance", "dynamically", "based", "on", "config", "." ]
python
train
danilobellini/audiolazy
audiolazy/lazy_text.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_text.py#L357-L417
def format_docstring(template_="{__doc__}", *args, **kwargs): r""" Parametrized decorator for adding/changing a function docstring. For changing a already available docstring in the function, the ``"{__doc__}"`` in the template is replaced by the original function docstring. Parameters ---------- template_ : A format-style template. *args, **kwargs : Positional and keyword arguments passed to the formatter. Examples -------- Closure docstring personalization: >>> def add(n): ... @format_docstring(number=n) ... def func(m): ... '''Adds {number} to the given value.''' ... return n + m ... return func >>> add(3).__doc__ 'Adds 3 to the given value.' >>> add("__").__doc__ 'Adds __ to the given value.' Same but using a lambda (you can also try with ``**locals()``): >>> def add_with_lambda(n): ... return format_docstring("Adds {0}.", n)(lambda m: n + m) >>> add_with_lambda(15).__doc__ 'Adds 15.' >>> add_with_lambda("something").__doc__ 'Adds something.' Mixing both template styles with ``{__doc__}``: >>> templ = "{0}, {1} is my {name} docstring:{__doc__}->\nEND!" >>> @format_docstring(templ, "zero", "one", "two", name="testing", k=[1, 2]) ... def test(): ... ''' ... Not empty! ... {2} != {k[0]} but {2} == {k[1]} ... ''' >>> print(test.__doc__) zero, one is my testing docstring: Not empty! two != 1 but two == 2 -> END! """ def decorator(func): if func.__doc__: kwargs["__doc__"] = func.__doc__.format(*args, **kwargs) func.__doc__ = template_.format(*args, **kwargs) return func return decorator
[ "def", "format_docstring", "(", "template_", "=", "\"{__doc__}\"", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "decorator", "(", "func", ")", ":", "if", "func", ".", "__doc__", ":", "kwargs", "[", "\"__doc__\"", "]", "=", "func", ".", ...
r""" Parametrized decorator for adding/changing a function docstring. For changing a already available docstring in the function, the ``"{__doc__}"`` in the template is replaced by the original function docstring. Parameters ---------- template_ : A format-style template. *args, **kwargs : Positional and keyword arguments passed to the formatter. Examples -------- Closure docstring personalization: >>> def add(n): ... @format_docstring(number=n) ... def func(m): ... '''Adds {number} to the given value.''' ... return n + m ... return func >>> add(3).__doc__ 'Adds 3 to the given value.' >>> add("__").__doc__ 'Adds __ to the given value.' Same but using a lambda (you can also try with ``**locals()``): >>> def add_with_lambda(n): ... return format_docstring("Adds {0}.", n)(lambda m: n + m) >>> add_with_lambda(15).__doc__ 'Adds 15.' >>> add_with_lambda("something").__doc__ 'Adds something.' Mixing both template styles with ``{__doc__}``: >>> templ = "{0}, {1} is my {name} docstring:{__doc__}->\nEND!" >>> @format_docstring(templ, "zero", "one", "two", name="testing", k=[1, 2]) ... def test(): ... ''' ... Not empty! ... {2} != {k[0]} but {2} == {k[1]} ... ''' >>> print(test.__doc__) zero, one is my testing docstring: Not empty! two != 1 but two == 2 -> END!
[ "r", "Parametrized", "decorator", "for", "adding", "/", "changing", "a", "function", "docstring", "." ]
python
train
tcalmant/ipopo
pelix/ipopo/handlers/requiresmap.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/requiresmap.py#L551-L564
def get_value(self): """ Retrieves the value to inject in the component :return: The value to inject """ with self._lock: # The value field must be a deep copy of our dictionary if self._future_value is not None: return { key: value[:] for key, value in self._future_value.items() } return None
[ "def", "get_value", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "# The value field must be a deep copy of our dictionary", "if", "self", ".", "_future_value", "is", "not", "None", ":", "return", "{", "key", ":", "value", "[", ":", "]", "for", "...
Retrieves the value to inject in the component :return: The value to inject
[ "Retrieves", "the", "value", "to", "inject", "in", "the", "component" ]
python
train
marcomusy/vtkplotter
vtkplotter/shapes.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/shapes.py#L336-L398
def Tube(points, r=1, c="r", alpha=1, res=12): """Build a tube along the line defined by a set of points. :param r: constant radius or list of radii. :type r: float, list :param c: constant color or list of colors for each point. :type c: float, list .. hint:: |ribbon| |ribbon.py|_ |tube| |tube.py|_ """ ppoints = vtk.vtkPoints() # Generate the polyline ppoints.SetData(numpy_to_vtk(points, deep=True)) lines = vtk.vtkCellArray() lines.InsertNextCell(len(points)) for i in range(len(points)): lines.InsertCellPoint(i) polyln = vtk.vtkPolyData() polyln.SetPoints(ppoints) polyln.SetLines(lines) tuf = vtk.vtkTubeFilter() tuf.CappingOn() tuf.SetNumberOfSides(res) tuf.SetInputData(polyln) if utils.isSequence(r): arr = numpy_to_vtk(np.ascontiguousarray(r), deep=True) arr.SetName("TubeRadius") polyln.GetPointData().AddArray(arr) polyln.GetPointData().SetActiveScalars("TubeRadius") tuf.SetVaryRadiusToVaryRadiusByAbsoluteScalar() else: tuf.SetRadius(r) usingColScals = False if utils.isSequence(c) and len(c) != 3: usingColScals = True cc = vtk.vtkUnsignedCharArray() cc.SetName("TubeColors") cc.SetNumberOfComponents(3) cc.SetNumberOfTuples(len(c)) for i, ic in enumerate(c): r, g, b = colors.getColor(ic) cc.InsertTuple3(i, int(255 * r), int(255 * g), int(255 * b)) polyln.GetPointData().AddArray(cc) c = None tuf.Update() polytu = tuf.GetOutput() actor = Actor(polytu, c=c, alpha=alpha, computeNormals=0) actor.phong() if usingColScals: actor.mapper.SetScalarModeToUsePointFieldData() actor.mapper.ScalarVisibilityOn() actor.mapper.SelectColorArray("TubeColors") actor.mapper.Modified() actor.base = np.array(points[0]) actor.top = np.array(points[-1]) settings.collectable_actors.append(actor) return actor
[ "def", "Tube", "(", "points", ",", "r", "=", "1", ",", "c", "=", "\"r\"", ",", "alpha", "=", "1", ",", "res", "=", "12", ")", ":", "ppoints", "=", "vtk", ".", "vtkPoints", "(", ")", "# Generate the polyline", "ppoints", ".", "SetData", "(", "numpy_...
Build a tube along the line defined by a set of points. :param r: constant radius or list of radii. :type r: float, list :param c: constant color or list of colors for each point. :type c: float, list .. hint:: |ribbon| |ribbon.py|_ |tube| |tube.py|_
[ "Build", "a", "tube", "along", "the", "line", "defined", "by", "a", "set", "of", "points", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/gurumate-2.8.6-py2.7.egg/gurumate/linux2/apt.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/gurumate-2.8.6-py2.7.egg/gurumate/linux2/apt.py#L30-L50
def get_installed_version(name): ''' returns installed package version and None if package is not installed ''' pattern = re.compile(r'''Installed:\s+(?P<version>.*)''') cmd = 'apt-cache policy %s' % name args = shlex.split(cmd) try: output = subprocess.check_output(args) if not output: return None except CalledProcessError: return None # check output match = pattern.search(output) if match: version = match.groupdict()['version'] if version == '(none)': return None else: return version
[ "def", "get_installed_version", "(", "name", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r'''Installed:\\s+(?P<version>.*)'''", ")", "cmd", "=", "'apt-cache policy %s'", "%", "name", "args", "=", "shlex", ".", "split", "(", "cmd", ")", "try", ":", ...
returns installed package version and None if package is not installed
[ "returns", "installed", "package", "version", "and", "None", "if", "package", "is", "not", "installed" ]
python
test
jdodds/feather
feather/application.py
https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/application.py#L29-L43
def register(self, plugin): """Take a feather.plugin.Plugin and tell our dispatcher about it. Plugins are expected to provide a list of the messages that they listen for and generate. If registering this plugin makes it so we have at least one plugin listening for and generating our expected messages, set self.valid to true """ self.needed_listeners -= plugin.listeners self.needed_messengers -= plugin.messengers if self.needed_messengers == self.needed_listeners == set(): self.valid = True self.dispatcher.register(plugin)
[ "def", "register", "(", "self", ",", "plugin", ")", ":", "self", ".", "needed_listeners", "-=", "plugin", ".", "listeners", "self", ".", "needed_messengers", "-=", "plugin", ".", "messengers", "if", "self", ".", "needed_messengers", "==", "self", ".", "neede...
Take a feather.plugin.Plugin and tell our dispatcher about it. Plugins are expected to provide a list of the messages that they listen for and generate. If registering this plugin makes it so we have at least one plugin listening for and generating our expected messages, set self.valid to true
[ "Take", "a", "feather", ".", "plugin", ".", "Plugin", "and", "tell", "our", "dispatcher", "about", "it", "." ]
python
train
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L667-L771
def Open(self, urn, aff4_type=None, mode="r", token=None, local_cache=None, age=NEWEST_TIME, follow_symlinks=True, transaction=None): """Opens the named object. This instantiates the object from the AFF4 data store. Note that the root aff4:/ object is a container for all other objects. Opening it for reading will instantiate a AFF4Volume instance, even if the row does not exist. The mode parameter specifies, how the object should be opened. A read only mode will raise when calling Set() on it, while a write only object will never read from the data store. Note that its impossible to open an object with pure write support (since we have no idea what type it should be without reading the data base) - use Create() instead for purely write mode. Args: urn: The urn to open. aff4_type: If this parameter is set, we raise an IOError if the object is not an instance of this type. This check is important when a different object can be stored in this location. If mode is "w", this parameter will determine the type of the object and is mandatory. mode: The mode to open the file with. token: The Security Token to use for opening this item. local_cache: A dict containing a cache as returned by GetAttributes. If set, this bypasses the factory cache. age: The age policy used to build this object. Should be one of NEWEST_TIME, ALL_TIMES or a time range given as a tuple (start, end) in microseconds since Jan 1st, 1970. follow_symlinks: If object opened is a symlink, follow it. transaction: A lock in case this object is opened under lock. Returns: An AFF4Object instance. Raises: IOError: If the object is not of the required type. AttributeError: If the requested mode is incorrect. """ if not data_store.AFF4Enabled(): raise NotImplementedError("AFF4 data store has been disabled.") _ValidateAFF4Type(aff4_type) if mode not in ["w", "r", "rw"]: raise AttributeError("Invalid mode %s" % mode) if mode == "w": if aff4_type is None: raise AttributeError("Need a type to open in write only mode.") return self.Create( urn, aff4_type, mode=mode, token=token, age=age, force_new_version=False, transaction=transaction) urn = rdfvalue.RDFURN(urn) if token is None: token = data_store.default_token if "r" in mode and (local_cache is None or urn not in local_cache): local_cache = dict(self.GetAttributes([urn], age=age)) # Read the row from the table. We know the object already exists if there is # some data in the local_cache already for this object. result = AFF4Object( urn, mode=mode, token=token, local_cache=local_cache, age=age, follow_symlinks=follow_symlinks, object_exists=bool(local_cache.get(urn)), transaction=transaction) result.aff4_type = aff4_type # Now we have a AFF4Object, turn it into the type it is currently supposed # to be as specified by Schema.TYPE. existing_type = result.Get(result.Schema.TYPE, default="AFF4Volume") if existing_type: try: result = result.Upgrade(AFF4Object.classes[existing_type]) except KeyError: raise InstantiationError("Unable to open %s, type %s unknown." % (urn, existing_type)) if aff4_type is not None and not isinstance(result, aff4_type): raise InstantiationError( "Object %s is of type %s, but required_type is %s" % (urn, result.__class__.__name__, aff4_type.__name__)) return result
[ "def", "Open", "(", "self", ",", "urn", ",", "aff4_type", "=", "None", ",", "mode", "=", "\"r\"", ",", "token", "=", "None", ",", "local_cache", "=", "None", ",", "age", "=", "NEWEST_TIME", ",", "follow_symlinks", "=", "True", ",", "transaction", "=", ...
Opens the named object. This instantiates the object from the AFF4 data store. Note that the root aff4:/ object is a container for all other objects. Opening it for reading will instantiate a AFF4Volume instance, even if the row does not exist. The mode parameter specifies, how the object should be opened. A read only mode will raise when calling Set() on it, while a write only object will never read from the data store. Note that its impossible to open an object with pure write support (since we have no idea what type it should be without reading the data base) - use Create() instead for purely write mode. Args: urn: The urn to open. aff4_type: If this parameter is set, we raise an IOError if the object is not an instance of this type. This check is important when a different object can be stored in this location. If mode is "w", this parameter will determine the type of the object and is mandatory. mode: The mode to open the file with. token: The Security Token to use for opening this item. local_cache: A dict containing a cache as returned by GetAttributes. If set, this bypasses the factory cache. age: The age policy used to build this object. Should be one of NEWEST_TIME, ALL_TIMES or a time range given as a tuple (start, end) in microseconds since Jan 1st, 1970. follow_symlinks: If object opened is a symlink, follow it. transaction: A lock in case this object is opened under lock. Returns: An AFF4Object instance. Raises: IOError: If the object is not of the required type. AttributeError: If the requested mode is incorrect.
[ "Opens", "the", "named", "object", "." ]
python
train
Ex-Mente/auxi.0
auxi/tools/chemistry/thermochemistry.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/tools/chemistry/thermochemistry.py#L91-L114
def H(self, T): """ Calculate the portion of enthalpy of the compound phase covered by this Cp record. :param T: [K] temperature :returns: [J/mol] Enthalpy. """ result = 0.0 if T < self.Tmax: lT = T else: lT = self.Tmax Tref = self.Tmin for c, e in zip(self._coefficients, self._exponents): # Analytically integrate Cp(T). if e == -1.0: result += c * math.log(lT/Tref) else: result += c * (lT**(e+1.0) - Tref**(e+1.0)) / (e+1.0) return result
[ "def", "H", "(", "self", ",", "T", ")", ":", "result", "=", "0.0", "if", "T", "<", "self", ".", "Tmax", ":", "lT", "=", "T", "else", ":", "lT", "=", "self", ".", "Tmax", "Tref", "=", "self", ".", "Tmin", "for", "c", ",", "e", "in", "zip", ...
Calculate the portion of enthalpy of the compound phase covered by this Cp record. :param T: [K] temperature :returns: [J/mol] Enthalpy.
[ "Calculate", "the", "portion", "of", "enthalpy", "of", "the", "compound", "phase", "covered", "by", "this", "Cp", "record", "." ]
python
valid
fumitoh/modelx
modelx/core/formula.py
https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/formula.py#L406-L443
def _reload(self, module=None): """Reload the source function from the source module. **Internal use only** Update the source function of the formula. This method is used to updated the underlying formula when the source code of the module in which the source function is read from is modified. If the formula was not created from a module, an error is raised. If ``module_`` is not given, the source module of the formula is reloaded. If ``module_`` is given and matches the source module, then the module_ is used without being reloaded. If ``module_`` is given and does not match the source module of the formula, an error is raised. Args: module_: A ``ModuleSource`` object Returns: self """ if self.module is None: raise RuntimeError elif module is None: import importlib module = ModuleSource(importlib.reload(module)) elif module.name != self.module: raise RuntimeError if self.name in module.funcs: func = module.funcs[self.name] self.__init__(func=func) else: self.__init__(func=NULL_FORMULA) return self
[ "def", "_reload", "(", "self", ",", "module", "=", "None", ")", ":", "if", "self", ".", "module", "is", "None", ":", "raise", "RuntimeError", "elif", "module", "is", "None", ":", "import", "importlib", "module", "=", "ModuleSource", "(", "importlib", "."...
Reload the source function from the source module. **Internal use only** Update the source function of the formula. This method is used to updated the underlying formula when the source code of the module in which the source function is read from is modified. If the formula was not created from a module, an error is raised. If ``module_`` is not given, the source module of the formula is reloaded. If ``module_`` is given and matches the source module, then the module_ is used without being reloaded. If ``module_`` is given and does not match the source module of the formula, an error is raised. Args: module_: A ``ModuleSource`` object Returns: self
[ "Reload", "the", "source", "function", "from", "the", "source", "module", "." ]
python
valid
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L403-L406
def ban_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/bans#create-ban" api_path = "/api/v2/bans" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "ban_create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/bans\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ...
https://developer.zendesk.com/rest_api/docs/chat/bans#create-ban
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "chat", "/", "bans#create", "-", "ban" ]
python
train
manns/pyspread
pyspread/src/actions/_grid_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_actions.py#L846-L863
def _show_final_overflow_message(self, row_overflow, col_overflow): """Displays overflow message after import in statusbar""" if row_overflow and col_overflow: overflow_cause = _("rows and columns") elif row_overflow: overflow_cause = _("rows") elif col_overflow: overflow_cause = _("columns") else: raise AssertionError(_("Import cell overflow missing")) statustext = \ _("The imported data did not fit into the grid {cause}. " "It has been truncated. Use a larger grid for full import.").\ format(cause=overflow_cause) post_command_event(self.main_window, self.StatusBarMsg, text=statustext)
[ "def", "_show_final_overflow_message", "(", "self", ",", "row_overflow", ",", "col_overflow", ")", ":", "if", "row_overflow", "and", "col_overflow", ":", "overflow_cause", "=", "_", "(", "\"rows and columns\"", ")", "elif", "row_overflow", ":", "overflow_cause", "="...
Displays overflow message after import in statusbar
[ "Displays", "overflow", "message", "after", "import", "in", "statusbar" ]
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/gcp_hub_client.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/gcp_hub_client.py#L220-L227
def Start(self): """Starts the worker thread.""" self._shutdown = False self._main_thread = threading.Thread(target=self._MainThreadProc) self._main_thread.name = 'Cloud Debugger main worker thread' self._main_thread.daemon = True self._main_thread.start()
[ "def", "Start", "(", "self", ")", ":", "self", ".", "_shutdown", "=", "False", "self", ".", "_main_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_MainThreadProc", ")", "self", ".", "_main_thread", ".", "name", "=", "'Cloud D...
Starts the worker thread.
[ "Starts", "the", "worker", "thread", "." ]
python
train
cuihantao/andes
andes/plot.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/plot.py#L125-L132
def get_values(self, idx): """ Return the variable values at the given indices """ if isinstance(idx, list): idx = np.array(idx, dtype=int) return self._data[:, idx]
[ "def", "get_values", "(", "self", ",", "idx", ")", ":", "if", "isinstance", "(", "idx", ",", "list", ")", ":", "idx", "=", "np", ".", "array", "(", "idx", ",", "dtype", "=", "int", ")", "return", "self", ".", "_data", "[", ":", ",", "idx", "]" ...
Return the variable values at the given indices
[ "Return", "the", "variable", "values", "at", "the", "given", "indices" ]
python
train
Miserlou/Zappa
zappa/asynchronous.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L334-L356
def run(func, args=[], kwargs={}, service='lambda', capture_response=False, remote_aws_lambda_function_name=None, remote_aws_region=None, **task_kwargs): """ Instead of decorating a function with @task, you can just run it directly. If you were going to do func(*args, **kwargs), then you will call this: import zappa.asynchronous.run zappa.asynchronous.run(func, args, kwargs) If you want to use SNS, then do: zappa.asynchronous.run(func, args, kwargs, service='sns') and other arguments are similar to @task """ lambda_function_name = remote_aws_lambda_function_name or os.environ.get('AWS_LAMBDA_FUNCTION_NAME') aws_region = remote_aws_region or os.environ.get('AWS_REGION') task_path = get_func_task_path(func) return ASYNC_CLASSES[service](lambda_function_name=lambda_function_name, aws_region=aws_region, capture_response=capture_response, **task_kwargs).send(task_path, args, kwargs)
[ "def", "run", "(", "func", ",", "args", "=", "[", "]", ",", "kwargs", "=", "{", "}", ",", "service", "=", "'lambda'", ",", "capture_response", "=", "False", ",", "remote_aws_lambda_function_name", "=", "None", ",", "remote_aws_region", "=", "None", ",", ...
Instead of decorating a function with @task, you can just run it directly. If you were going to do func(*args, **kwargs), then you will call this: import zappa.asynchronous.run zappa.asynchronous.run(func, args, kwargs) If you want to use SNS, then do: zappa.asynchronous.run(func, args, kwargs, service='sns') and other arguments are similar to @task
[ "Instead", "of", "decorating", "a", "function", "with", "@task", "you", "can", "just", "run", "it", "directly", ".", "If", "you", "were", "going", "to", "do", "func", "(", "*", "args", "**", "kwargs", ")", "then", "you", "will", "call", "this", ":" ]
python
train
ekmmetering/ekmmeters
ekmmeters.py
https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L1011-L1019
def setPollingValues(self, max_waits, wait_sleep): """ Optional polling loop control Args: max_waits (int): waits wait_sleep (int): ms per wait """ self.m_max_waits = max_waits self.m_wait_sleep = wait_sleep
[ "def", "setPollingValues", "(", "self", ",", "max_waits", ",", "wait_sleep", ")", ":", "self", ".", "m_max_waits", "=", "max_waits", "self", ".", "m_wait_sleep", "=", "wait_sleep" ]
Optional polling loop control Args: max_waits (int): waits wait_sleep (int): ms per wait
[ "Optional", "polling", "loop", "control" ]
python
test
materialsproject/pymatgen
pymatgen/io/vasp/outputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/outputs.py#L130-L143
def _parse_from_incar(filename, key): """ Helper function to parse a parameter from the INCAR. """ dirname = os.path.dirname(filename) for f in os.listdir(dirname): if re.search(r"INCAR", f): warnings.warn("INCAR found. Using " + key + " from INCAR.") incar = Incar.from_file(os.path.join(dirname, f)) if key in incar: return incar[key] else: return None return None
[ "def", "_parse_from_incar", "(", "filename", ",", "key", ")", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "for", "f", "in", "os", ".", "listdir", "(", "dirname", ")", ":", "if", "re", ".", "search", "(", "r\"INCAR\""...
Helper function to parse a parameter from the INCAR.
[ "Helper", "function", "to", "parse", "a", "parameter", "from", "the", "INCAR", "." ]
python
train
jd/tenacity
tenacity/compat.py
https://github.com/jd/tenacity/blob/354c40b7dc8e728c438668100dd020b65c84dfc6/tenacity/compat.py#L231-L247
def before_func_accept_retry_state(fn): """Wrap "before" function to accept "retry_state".""" if not six.callable(fn): return fn if func_takes_retry_state(fn): return fn @_utils.wraps(fn) def wrapped_before_func(retry_state): # func, trial_number, trial_time_taken warn_about_non_retry_state_deprecation('before', fn, stacklevel=4) return fn( retry_state.fn, retry_state.attempt_number, ) return wrapped_before_func
[ "def", "before_func_accept_retry_state", "(", "fn", ")", ":", "if", "not", "six", ".", "callable", "(", "fn", ")", ":", "return", "fn", "if", "func_takes_retry_state", "(", "fn", ")", ":", "return", "fn", "@", "_utils", ".", "wraps", "(", "fn", ")", "d...
Wrap "before" function to accept "retry_state".
[ "Wrap", "before", "function", "to", "accept", "retry_state", "." ]
python
train
faucamp/python-gsmmodem
gsmmodem/modem.py
https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/modem.py#L76-L85
def status(self): """ Status of this SMS. Can be ENROUTE, DELIVERED or FAILED The actual status report object may be accessed via the 'report' attribute if status is 'DELIVERED' or 'FAILED' """ if self.report == None: return SentSms.ENROUTE else: return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED
[ "def", "status", "(", "self", ")", ":", "if", "self", ".", "report", "==", "None", ":", "return", "SentSms", ".", "ENROUTE", "else", ":", "return", "SentSms", ".", "DELIVERED", "if", "self", ".", "report", ".", "deliveryStatus", "==", "StatusReport", "."...
Status of this SMS. Can be ENROUTE, DELIVERED or FAILED The actual status report object may be accessed via the 'report' attribute if status is 'DELIVERED' or 'FAILED'
[ "Status", "of", "this", "SMS", ".", "Can", "be", "ENROUTE", "DELIVERED", "or", "FAILED", "The", "actual", "status", "report", "object", "may", "be", "accessed", "via", "the", "report", "attribute", "if", "status", "is", "DELIVERED", "or", "FAILED" ]
python
train
jaredLunde/vital-tools
vital/debug/__init__.py
https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/debug/__init__.py#L1925-L1935
def finish(self): """ Resets the progress bar and clears it from the terminal """ pct = floor(round(self.progress/self.size, 2)*100) pr = floor(pct*.33) bar = "".join([" " for x in range(pr-1)] + ["↦"]) subprogress = self.format_parent_bar() if self.parent_bar else "" fin = "Loading{} ={}{} ({}%)".format(subprogress, bar, "ӿ", pct) print(fin.ljust(len(fin)+5), end="\r") time.sleep(0.10) print("\033[K\033[1A") self.progress = 0
[ "def", "finish", "(", "self", ")", ":", "pct", "=", "floor", "(", "round", "(", "self", ".", "progress", "/", "self", ".", "size", ",", "2", ")", "*", "100", ")", "pr", "=", "floor", "(", "pct", "*", ".33", ")", "bar", "=", "\"\"", ".", "join...
Resets the progress bar and clears it from the terminal
[ "Resets", "the", "progress", "bar", "and", "clears", "it", "from", "the", "terminal" ]
python
train
dslackw/slpkg
slpkg/repositories.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/repositories.py#L71-L87
def remove(self, repo): """Remove custom repository """ rem_repo = False with open(self.custom_repo_file, "w") as repos: for line in self.custom_repositories_list.splitlines(): repo_name = line.split()[0] if repo_name != repo: repos.write(line + "\n") else: print("\nRepository '{0}' successfully " "removed\n".format(repo)) rem_repo = True repos.close() if not rem_repo: print("\nRepository '{0}' doesn't exist\n".format(repo)) raise SystemExit()
[ "def", "remove", "(", "self", ",", "repo", ")", ":", "rem_repo", "=", "False", "with", "open", "(", "self", ".", "custom_repo_file", ",", "\"w\"", ")", "as", "repos", ":", "for", "line", "in", "self", ".", "custom_repositories_list", ".", "splitlines", "...
Remove custom repository
[ "Remove", "custom", "repository" ]
python
train
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L98-L103
def get_cutoff_energy(self): '''Determine the cutoff energy from the output''' return Value( scalars=[Scalar(value=self.settings["kinetic-energy cutoff"])], units=self.settings['kinetic-energy cutoff units'] )
[ "def", "get_cutoff_energy", "(", "self", ")", ":", "return", "Value", "(", "scalars", "=", "[", "Scalar", "(", "value", "=", "self", ".", "settings", "[", "\"kinetic-energy cutoff\"", "]", ")", "]", ",", "units", "=", "self", ".", "settings", "[", "'kine...
Determine the cutoff energy from the output
[ "Determine", "the", "cutoff", "energy", "from", "the", "output" ]
python
train
ChristopherRogers1991/python-irsend
py_irsend/irsend.py
https://github.com/ChristopherRogers1991/python-irsend/blob/aab8ee05d47cc0e3c8c84d220bc6777aa720b232/py_irsend/irsend.py#L115-L134
def send_start(remote, code, device=None, address=None): """ All parameters are passed to irsend. See the man page for irsend for details about their usage. Parameters ---------- remote: str code: str device: str address: str Notes ----- No attempt is made to catch or handle errors. See the documentation for subprocess.check_output to see the types of exceptions it may raise. """ args = ['send_start', remote, code] _call(args, device, address)
[ "def", "send_start", "(", "remote", ",", "code", ",", "device", "=", "None", ",", "address", "=", "None", ")", ":", "args", "=", "[", "'send_start'", ",", "remote", ",", "code", "]", "_call", "(", "args", ",", "device", ",", "address", ")" ]
All parameters are passed to irsend. See the man page for irsend for details about their usage. Parameters ---------- remote: str code: str device: str address: str Notes ----- No attempt is made to catch or handle errors. See the documentation for subprocess.check_output to see the types of exceptions it may raise.
[ "All", "parameters", "are", "passed", "to", "irsend", ".", "See", "the", "man", "page", "for", "irsend", "for", "details", "about", "their", "usage", "." ]
python
train
tcalmant/ipopo
pelix/rsa/__init__.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/rsa/__init__.py#L1582-L1594
def copy_non_ecf(props, target): # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any] """ Copies non-ECF properties from ``props`` to ``target`` :param props: An input dictionary :param target: The dictionary to copy non-ECF properties to :return: The ``target`` dictionary """ target.update( {key: value for key, value in props.items() if key not in ECFPROPNAMES} ) return target
[ "def", "copy_non_ecf", "(", "props", ",", "target", ")", ":", "# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]", "target", ".", "update", "(", "{", "key", ":", "value", "for", "key", ",", "value", "in", "props", ".", "items", "(", ")", "if", "key", ...
Copies non-ECF properties from ``props`` to ``target`` :param props: An input dictionary :param target: The dictionary to copy non-ECF properties to :return: The ``target`` dictionary
[ "Copies", "non", "-", "ECF", "properties", "from", "props", "to", "target" ]
python
train
swharden/SWHLab
doc/oldcode/swhlab/core/common.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/oldcode/swhlab/core/common.py#L439-L455
def filter_gaussian(Ys,sigma,plotToo=False): """simple gaussian convolution. Returns same # of points as gotten.""" timeA=time.time() window=scipy.signal.gaussian(len(Ys),sigma) window/=sum(window) Ys2=np.convolve(Ys,window,'same') print("LEN:",len(Ys2),len(Ys)) timeB=time.time() print("convolution took %.03f ms"%((timeB-timeA)*1000)) if len(Ys2)!=len(Ys): print("?!?!?!? convolution point size mismatch") if plotToo: pylab.plot(Ys,label='original',alpha=.2) pylab.plot(Ys2,'b-',label='smooth') pylab.legend() pylab.show() return Ys2
[ "def", "filter_gaussian", "(", "Ys", ",", "sigma", ",", "plotToo", "=", "False", ")", ":", "timeA", "=", "time", ".", "time", "(", ")", "window", "=", "scipy", ".", "signal", ".", "gaussian", "(", "len", "(", "Ys", ")", ",", "sigma", ")", "window",...
simple gaussian convolution. Returns same # of points as gotten.
[ "simple", "gaussian", "convolution", ".", "Returns", "same", "#", "of", "points", "as", "gotten", "." ]
python
valid
AbletonAG/abl.vpath
abl/vpath/base/fs.py
https://github.com/AbletonAG/abl.vpath/blob/a57491347f6e7567afa047216e5b6f6035226eaf/abl/vpath/base/fs.py#L804-L811
def lock(self, fail_on_lock=False, cleanup=False): """ Allows to lock a file via abl.util.LockFile, with the same parameters there. Returns an opened, writable file. """ return self.connection.lock(self, fail_on_lock, cleanup)
[ "def", "lock", "(", "self", ",", "fail_on_lock", "=", "False", ",", "cleanup", "=", "False", ")", ":", "return", "self", ".", "connection", ".", "lock", "(", "self", ",", "fail_on_lock", ",", "cleanup", ")" ]
Allows to lock a file via abl.util.LockFile, with the same parameters there. Returns an opened, writable file.
[ "Allows", "to", "lock", "a", "file", "via", "abl", ".", "util", ".", "LockFile", "with", "the", "same", "parameters", "there", "." ]
python
train
pypa/pipenv
pipenv/vendor/markupsafe/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/markupsafe/__init__.py#L127-L150
def unescape(self): """Convert escaped markup back into a text string. This replaces HTML entities with the characters they represent. >>> Markup('Main &raquo; <em>About</em>').unescape() 'Main » <em>About</em>' """ from ._constants import HTML_ENTITIES def handle_match(m): name = m.group(1) if name in HTML_ENTITIES: return unichr(HTML_ENTITIES[name]) try: if name[:2] in ("#x", "#X"): return unichr(int(name[2:], 16)) elif name.startswith("#"): return unichr(int(name[1:])) except ValueError: pass # Don't modify unexpected input. return m.group() return _entity_re.sub(handle_match, text_type(self))
[ "def", "unescape", "(", "self", ")", ":", "from", ".", "_constants", "import", "HTML_ENTITIES", "def", "handle_match", "(", "m", ")", ":", "name", "=", "m", ".", "group", "(", "1", ")", "if", "name", "in", "HTML_ENTITIES", ":", "return", "unichr", "(",...
Convert escaped markup back into a text string. This replaces HTML entities with the characters they represent. >>> Markup('Main &raquo; <em>About</em>').unescape() 'Main » <em>About</em>'
[ "Convert", "escaped", "markup", "back", "into", "a", "text", "string", ".", "This", "replaces", "HTML", "entities", "with", "the", "characters", "they", "represent", "." ]
python
train
stevearc/dql
dql/cli.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/cli.py#L605-L620
def completedefault(self, text, line, *_): """ Autocomplete table names in queries """ tokens = line.split() try: before = tokens[-2] complete = before.lower() in ("from", "update", "table", "into") if tokens[0].lower() == "dump": complete = True if complete: return [ t + " " for t in self.engine.cached_descriptions if t.startswith(text) ] except KeyError: pass
[ "def", "completedefault", "(", "self", ",", "text", ",", "line", ",", "*", "_", ")", ":", "tokens", "=", "line", ".", "split", "(", ")", "try", ":", "before", "=", "tokens", "[", "-", "2", "]", "complete", "=", "before", ".", "lower", "(", ")", ...
Autocomplete table names in queries
[ "Autocomplete", "table", "names", "in", "queries" ]
python
train
appliedsec/pygeoip
pygeoip/util.py
https://github.com/appliedsec/pygeoip/blob/2a725df0b727e8b08f217ab84f7b8243c42554f5/pygeoip/util.py#L42-L48
def str2fp(data): """ Convert bytes data to file handle object (StringIO or BytesIO). :arg data: String data to transform """ return BytesIO(bytearray(data, const.ENCODING)) if const.PY3 else StringIO(data)
[ "def", "str2fp", "(", "data", ")", ":", "return", "BytesIO", "(", "bytearray", "(", "data", ",", "const", ".", "ENCODING", ")", ")", "if", "const", ".", "PY3", "else", "StringIO", "(", "data", ")" ]
Convert bytes data to file handle object (StringIO or BytesIO). :arg data: String data to transform
[ "Convert", "bytes", "data", "to", "file", "handle", "object", "(", "StringIO", "or", "BytesIO", ")", "." ]
python
valid
Clinical-Genomics/scout
scout/utils/requests.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/utils/requests.py#L225-L265
def fetch_ensembl_exons(build='37'): """Fetch the ensembl genes Args: build(str): ['37', '38'] """ LOG.info("Fetching ensembl exons build %s ...", build) if build == '37': url = 'http://grch37.ensembl.org' else: url = 'http://www.ensembl.org' dataset_name = 'hsapiens_gene_ensembl' dataset = pybiomart.Dataset(name=dataset_name, host=url) attributes = [ 'chromosome_name', 'ensembl_gene_id', 'ensembl_transcript_id', 'ensembl_exon_id', 'exon_chrom_start', 'exon_chrom_end', '5_utr_start', '5_utr_end', '3_utr_start', '3_utr_end', 'strand', 'rank' ] filters = { 'chromosome_name': CHROMOSOMES, } result = dataset.query( attributes = attributes, filters = filters ) return result
[ "def", "fetch_ensembl_exons", "(", "build", "=", "'37'", ")", ":", "LOG", ".", "info", "(", "\"Fetching ensembl exons build %s ...\"", ",", "build", ")", "if", "build", "==", "'37'", ":", "url", "=", "'http://grch37.ensembl.org'", "else", ":", "url", "=", "'ht...
Fetch the ensembl genes Args: build(str): ['37', '38']
[ "Fetch", "the", "ensembl", "genes", "Args", ":", "build", "(", "str", ")", ":", "[", "37", "38", "]" ]
python
test
bd808/python-iptools
iptools/ipv4.py
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L313-L352
def validate_subnet(s): """Validate a dotted-quad ip address including a netmask. The string is considered a valid dotted-quad address with netmask if it consists of one to four octets (0-255) seperated by periods (.) followed by a forward slash (/) and a subnet bitmask which is expressed in dotted-quad format. >>> validate_subnet('127.0.0.1/255.255.255.255') True >>> validate_subnet('127.0/255.0.0.0') True >>> validate_subnet('127.0/255') True >>> validate_subnet('127.0.0.256/255.255.255.255') False >>> validate_subnet('127.0.0.1/255.255.255.256') False >>> validate_subnet('127.0.0.0') False >>> validate_subnet(None) Traceback (most recent call last): ... TypeError: expected string or unicode :param s: String to validate as a dotted-quad ip address with netmask. :type s: str :returns: ``True`` if a valid dotted-quad ip address with netmask, ``False`` otherwise. :raises: TypeError """ if isinstance(s, basestring): if '/' in s: start, mask = s.split('/', 2) return validate_ip(start) and validate_netmask(mask) else: return False raise TypeError("expected string or unicode")
[ "def", "validate_subnet", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "basestring", ")", ":", "if", "'/'", "in", "s", ":", "start", ",", "mask", "=", "s", ".", "split", "(", "'/'", ",", "2", ")", "return", "validate_ip", "(", "start", ...
Validate a dotted-quad ip address including a netmask. The string is considered a valid dotted-quad address with netmask if it consists of one to four octets (0-255) seperated by periods (.) followed by a forward slash (/) and a subnet bitmask which is expressed in dotted-quad format. >>> validate_subnet('127.0.0.1/255.255.255.255') True >>> validate_subnet('127.0/255.0.0.0') True >>> validate_subnet('127.0/255') True >>> validate_subnet('127.0.0.256/255.255.255.255') False >>> validate_subnet('127.0.0.1/255.255.255.256') False >>> validate_subnet('127.0.0.0') False >>> validate_subnet(None) Traceback (most recent call last): ... TypeError: expected string or unicode :param s: String to validate as a dotted-quad ip address with netmask. :type s: str :returns: ``True`` if a valid dotted-quad ip address with netmask, ``False`` otherwise. :raises: TypeError
[ "Validate", "a", "dotted", "-", "quad", "ip", "address", "including", "a", "netmask", "." ]
python
train
RockFeng0/rtsf-web
webuidriver/actions.py
https://github.com/RockFeng0/rtsf-web/blob/ceabcf62ddf1c969a97b5c7a4a4c547198b6ea71/webuidriver/actions.py#L706-L723
def Upload(cls, filename): """ 文件上传, 非原生input @todo: some upload.exe not prepared @param file: 文件名(文件必须存在在工程resource目录下), upload.exe工具放在工程tools目录下 """ raise Exception("to do") TOOLS_PATH = "" RESOURCE_PATH = "" tool_4path = os.path.join(TOOLS_PATH, "upload.exe") file_4path = os.path.join(RESOURCE_PATH, filename) #file_4path.decode('utf-8').encode('gbk') if os.path.isfile(file_4path): cls.Click() os.system(tool_4path + ' ' + file_4path) else: raise Exception('%s is not exists' % file_4path)
[ "def", "Upload", "(", "cls", ",", "filename", ")", ":", "raise", "Exception", "(", "\"to do\"", ")", "TOOLS_PATH", "=", "\"\"", "RESOURCE_PATH", "=", "\"\"", "tool_4path", "=", "os", ".", "path", ".", "join", "(", "TOOLS_PATH", ",", "\"upload.exe\"", ")", ...
文件上传, 非原生input @todo: some upload.exe not prepared @param file: 文件名(文件必须存在在工程resource目录下), upload.exe工具放在工程tools目录下
[ "文件上传,", "非原生input" ]
python
train
fhamborg/news-please
newsplease/examples/commoncrawl.py
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/examples/commoncrawl.py#L92-L104
def on_valid_article_extracted(article): """ This function will be invoked for each article that was extracted successfully from the archived data and that satisfies the filter criteria. :param article: :return: """ # do whatever you need to do with the article (e.g., save it to disk, store it in ElasticSearch, etc.) with open(__get_pretty_filepath(my_local_download_dir_article, article), 'w') as outfile: if my_json_export_style == 0: json.dump(article.__dict__, outfile, default=str, separators=(',', ':')) elif my_json_export_style == 1: json.dump(article.__dict__, outfile, default=str, indent=4, sort_keys=True)
[ "def", "on_valid_article_extracted", "(", "article", ")", ":", "# do whatever you need to do with the article (e.g., save it to disk, store it in ElasticSearch, etc.)", "with", "open", "(", "__get_pretty_filepath", "(", "my_local_download_dir_article", ",", "article", ")", ",", "'w...
This function will be invoked for each article that was extracted successfully from the archived data and that satisfies the filter criteria. :param article: :return:
[ "This", "function", "will", "be", "invoked", "for", "each", "article", "that", "was", "extracted", "successfully", "from", "the", "archived", "data", "and", "that", "satisfies", "the", "filter", "criteria", ".", ":", "param", "article", ":", ":", "return", "...
python
train
srittau/python-asserts
asserts/__init__.py
https://github.com/srittau/python-asserts/blob/1d5c797031c68ee27552d1c94e7f918c3d3d0453/asserts/__init__.py#L553-L572
def assert_is(first, second, msg_fmt="{msg}"): """Fail if first and second do not refer to the same object. >>> list1 = [5, "foo"] >>> list2 = [5, "foo"] >>> assert_is(list1, list1) >>> assert_is(list1, list2) Traceback (most recent call last): ... AssertionError: [5, 'foo'] is not [5, 'foo'] The following msg_fmt arguments are supported: * msg - the default error message * first - the first argument * second - the second argument """ if first is not second: msg = "{!r} is not {!r}".format(first, second) fail(msg_fmt.format(msg=msg, first=first, second=second))
[ "def", "assert_is", "(", "first", ",", "second", ",", "msg_fmt", "=", "\"{msg}\"", ")", ":", "if", "first", "is", "not", "second", ":", "msg", "=", "\"{!r} is not {!r}\"", ".", "format", "(", "first", ",", "second", ")", "fail", "(", "msg_fmt", ".", "f...
Fail if first and second do not refer to the same object. >>> list1 = [5, "foo"] >>> list2 = [5, "foo"] >>> assert_is(list1, list1) >>> assert_is(list1, list2) Traceback (most recent call last): ... AssertionError: [5, 'foo'] is not [5, 'foo'] The following msg_fmt arguments are supported: * msg - the default error message * first - the first argument * second - the second argument
[ "Fail", "if", "first", "and", "second", "do", "not", "refer", "to", "the", "same", "object", "." ]
python
train
dslackw/slpkg
slpkg/graph.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/graph.py#L98-L102
def remove_dot(self): """Remove .dot files """ if os.path.isfile("{0}.dot".format(self.image)): os.remove("{0}.dot".format(self.image))
[ "def", "remove_dot", "(", "self", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "\"{0}.dot\"", ".", "format", "(", "self", ".", "image", ")", ")", ":", "os", ".", "remove", "(", "\"{0}.dot\"", ".", "format", "(", "self", ".", "image", ")",...
Remove .dot files
[ "Remove", ".", "dot", "files" ]
python
train
exosite-labs/pyonep
pyonep/portals/__init__.py
https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/portals/__init__.py#L518-L523
def get_portal_cik(self, portal_name): """ Retrieves portal object according to 'portal_name' and returns its cik. """ portal = self.get_portal_by_name(portal_name) cik = portal[2][1]['info']['key'] return cik
[ "def", "get_portal_cik", "(", "self", ",", "portal_name", ")", ":", "portal", "=", "self", ".", "get_portal_by_name", "(", "portal_name", ")", "cik", "=", "portal", "[", "2", "]", "[", "1", "]", "[", "'info'", "]", "[", "'key'", "]", "return", "cik" ]
Retrieves portal object according to 'portal_name' and returns its cik.
[ "Retrieves", "portal", "object", "according", "to", "portal_name", "and", "returns", "its", "cik", "." ]
python
train
BerkeleyAutomation/perception
perception/realsense_sensor.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/realsense_sensor.py#L79-L100
def _config_pipe(self): """Configures the pipeline to stream color and depth. """ self._cfg.enable_device(self.id) # configure the color stream self._cfg.enable_stream( rs.stream.color, RealSenseSensor.COLOR_IM_WIDTH, RealSenseSensor.COLOR_IM_HEIGHT, rs.format.bgr8, RealSenseSensor.FPS ) # configure the depth stream self._cfg.enable_stream( rs.stream.depth, RealSenseSensor.DEPTH_IM_WIDTH, 360 if self._depth_align else RealSenseSensor.DEPTH_IM_HEIGHT, rs.format.z16, RealSenseSensor.FPS )
[ "def", "_config_pipe", "(", "self", ")", ":", "self", ".", "_cfg", ".", "enable_device", "(", "self", ".", "id", ")", "# configure the color stream", "self", ".", "_cfg", ".", "enable_stream", "(", "rs", ".", "stream", ".", "color", ",", "RealSenseSensor", ...
Configures the pipeline to stream color and depth.
[ "Configures", "the", "pipeline", "to", "stream", "color", "and", "depth", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/bitwise.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/bitwise.py#L115-L129
def LSR_C(value, amount, width): """ The ARM LSR_C (logical shift right with carry) operation. :param value: Value to shift :type value: int or long or BitVec :param int amount: How many bits to shift it. :param int width: Width of the value :return: Resultant value and carry result :rtype tuple """ assert amount > 0 result = GetNBits(value >> amount, width) carry = Bit(value >> (amount - 1), 0) return (result, carry)
[ "def", "LSR_C", "(", "value", ",", "amount", ",", "width", ")", ":", "assert", "amount", ">", "0", "result", "=", "GetNBits", "(", "value", ">>", "amount", ",", "width", ")", "carry", "=", "Bit", "(", "value", ">>", "(", "amount", "-", "1", ")", ...
The ARM LSR_C (logical shift right with carry) operation. :param value: Value to shift :type value: int or long or BitVec :param int amount: How many bits to shift it. :param int width: Width of the value :return: Resultant value and carry result :rtype tuple
[ "The", "ARM", "LSR_C", "(", "logical", "shift", "right", "with", "carry", ")", "operation", "." ]
python
valid
chaoss/grimoirelab-elk
grimoire_elk/enriched/git.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/git.py#L727-L768
def delete_commit_branches(self, enrich_backend): """Delete the information about branches from the documents representing commits in the enriched index. :param enrich_backend: the enrich backend """ fltr = """ "filter": [ { "term": { "origin": "%s" } } ] """ % self.perceval_backend.origin # reset references in enrich index es_query = """ { "script": { "source": "ctx._source.branches = new HashSet();", "lang": "painless" }, "query": { "bool": { %s } } } """ % fltr index = enrich_backend.elastic.index_url r = self.requests.post(index + "/_update_by_query?refresh", data=es_query, headers=HEADER_JSON, verify=False) try: r.raise_for_status() except requests.exceptions.HTTPError: logger.error("Error while deleting branches on %s", self.elastic.anonymize_url(index)) logger.error(r.text) return logger.debug("Delete branches %s, index %s", r.text, self.elastic.anonymize_url(index))
[ "def", "delete_commit_branches", "(", "self", ",", "enrich_backend", ")", ":", "fltr", "=", "\"\"\"\n \"filter\": [\n {\n \"term\": {\n \"origin\": \"%s\"\n }\n }\n ]\n \"\"\""...
Delete the information about branches from the documents representing commits in the enriched index. :param enrich_backend: the enrich backend
[ "Delete", "the", "information", "about", "branches", "from", "the", "documents", "representing", "commits", "in", "the", "enriched", "index", "." ]
python
train
yahoo/TensorFlowOnSpark
examples/imagenet/inception/slim/losses.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/slim/losses.py#L56-L72
def l2_regularizer(weight=1.0, scope=None): """Define a L2 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function. """ def regularizer(tensor): with tf.name_scope(scope, 'L2Regularizer', [tensor]): l2_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight') return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value') return regularizer
[ "def", "l2_regularizer", "(", "weight", "=", "1.0", ",", "scope", "=", "None", ")", ":", "def", "regularizer", "(", "tensor", ")", ":", "with", "tf", ".", "name_scope", "(", "scope", ",", "'L2Regularizer'", ",", "[", "tensor", "]", ")", ":", "l2_weight...
Define a L2 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function.
[ "Define", "a", "L2", "regularizer", "." ]
python
train
inspirehep/harvesting-kit
harvestingkit/world_scientific_package.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/world_scientific_package.py#L277-L283
def _attach_fulltext(self, rec, doi): """Attach fulltext FFT.""" url = os.path.join(self.url_prefix, doi) record_add_field(rec, 'FFT', subfields=[('a', url), ('t', 'INSPIRE-PUBLIC'), ('d', 'Fulltext')])
[ "def", "_attach_fulltext", "(", "self", ",", "rec", ",", "doi", ")", ":", "url", "=", "os", ".", "path", ".", "join", "(", "self", ".", "url_prefix", ",", "doi", ")", "record_add_field", "(", "rec", ",", "'FFT'", ",", "subfields", "=", "[", "(", "'...
Attach fulltext FFT.
[ "Attach", "fulltext", "FFT", "." ]
python
valid
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxdataobject_functions.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxdataobject_functions.py#L37-L76
def dxlink(object_id, project_id=None, field=None): ''' :param object_id: Object ID or the object handler itself :type object_id: string or :class:`~dxpy.bindings.DXDataObject` :param project_id: A project ID, if creating a cross-project DXLink :type project_id: string :param field: A field name, if creating a job-based object reference :type field: string :returns: A dict formatted as a symbolic DNAnexus object reference :rtype: dict Creates a DXLink to the specified object. If `object_id` is already a link, it is returned without modification. If `object_id is a `~dxpy.bindings.DXDataObject`, the object ID is retrieved via its `get_id()` method. If `field` is not `None`, `object_id` is expected to be of class 'job' and the link created is a Job Based Object Reference (JBOR), which is of the form:: {'$dnanexus_link': {'job': object_id, 'field': field}} If `field` is `None` and `project_id` is not `None`, the link created is a project-specific link of the form:: {'$dnanexus_link': {'project': project_id, 'id': object_id}} ''' if is_dxlink(object_id): return object_id if isinstance(object_id, DXDataObject): object_id = object_id.get_id() if not any((project_id, field)): return {'$dnanexus_link': object_id} elif field: dxpy.verify_string_dxid(object_id, "job") return {'$dnanexus_link': {'job': object_id, 'field': field}} else: return {'$dnanexus_link': {'project': project_id, 'id': object_id}}
[ "def", "dxlink", "(", "object_id", ",", "project_id", "=", "None", ",", "field", "=", "None", ")", ":", "if", "is_dxlink", "(", "object_id", ")", ":", "return", "object_id", "if", "isinstance", "(", "object_id", ",", "DXDataObject", ")", ":", "object_id", ...
:param object_id: Object ID or the object handler itself :type object_id: string or :class:`~dxpy.bindings.DXDataObject` :param project_id: A project ID, if creating a cross-project DXLink :type project_id: string :param field: A field name, if creating a job-based object reference :type field: string :returns: A dict formatted as a symbolic DNAnexus object reference :rtype: dict Creates a DXLink to the specified object. If `object_id` is already a link, it is returned without modification. If `object_id is a `~dxpy.bindings.DXDataObject`, the object ID is retrieved via its `get_id()` method. If `field` is not `None`, `object_id` is expected to be of class 'job' and the link created is a Job Based Object Reference (JBOR), which is of the form:: {'$dnanexus_link': {'job': object_id, 'field': field}} If `field` is `None` and `project_id` is not `None`, the link created is a project-specific link of the form:: {'$dnanexus_link': {'project': project_id, 'id': object_id}}
[ ":", "param", "object_id", ":", "Object", "ID", "or", "the", "object", "handler", "itself", ":", "type", "object_id", ":", "string", "or", ":", "class", ":", "~dxpy", ".", "bindings", ".", "DXDataObject", ":", "param", "project_id", ":", "A", "project", ...
python
train
NASA-AMMOS/AIT-Core
ait/core/bsc.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/bsc.py#L400-L422
def _get_log_file(self, handler): ''' Generate log file path for a given handler Args: handler: The handler configuration dictionary for which a log file path should be generated. ''' if 'file_name_pattern' not in handler: filename = '%Y-%m-%d-%H-%M-%S-{name}.pcap' else: filename = handler['file_name_pattern'] log_file = handler['log_dir'] if 'path' in handler: log_file = os.path.join(log_file, handler['path'], filename) else: log_file = os.path.join(log_file, filename) log_file = time.strftime(log_file, time.gmtime()) log_file = log_file.format(**handler) return log_file
[ "def", "_get_log_file", "(", "self", ",", "handler", ")", ":", "if", "'file_name_pattern'", "not", "in", "handler", ":", "filename", "=", "'%Y-%m-%d-%H-%M-%S-{name}.pcap'", "else", ":", "filename", "=", "handler", "[", "'file_name_pattern'", "]", "log_file", "=", ...
Generate log file path for a given handler Args: handler: The handler configuration dictionary for which a log file path should be generated.
[ "Generate", "log", "file", "path", "for", "a", "given", "handler" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/oinspect.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/oinspect.py#L461-L479
def _format_fields(self, fields, title_width=12): """Formats a list of fields for display. Parameters ---------- fields : list A list of 2-tuples: (field_title, field_content) title_width : int How many characters to pad titles to. Default 12. """ out = [] header = self.__head for title, content in fields: if len(content.splitlines()) > 1: title = header(title + ":") + "\n" else: title = header((title+":").ljust(title_width)) out.append(title + content) return "\n".join(out)
[ "def", "_format_fields", "(", "self", ",", "fields", ",", "title_width", "=", "12", ")", ":", "out", "=", "[", "]", "header", "=", "self", ".", "__head", "for", "title", ",", "content", "in", "fields", ":", "if", "len", "(", "content", ".", "splitlin...
Formats a list of fields for display. Parameters ---------- fields : list A list of 2-tuples: (field_title, field_content) title_width : int How many characters to pad titles to. Default 12.
[ "Formats", "a", "list", "of", "fields", "for", "display", "." ]
python
test
CityOfZion/neo-python-rpc
neorpc/Client.py
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L176-L187
def get_transaction(self, tx_hash, id=None, endpoint=None): """ Look up a transaction by hash. Args: tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json: the transaction as a json object """ return self._call_endpoint(GET_RAW_TRANSACTION, params=[tx_hash, 1], id=id, endpoint=endpoint)
[ "def", "get_transaction", "(", "self", ",", "tx_hash", ",", "id", "=", "None", ",", "endpoint", "=", "None", ")", ":", "return", "self", ".", "_call_endpoint", "(", "GET_RAW_TRANSACTION", ",", "params", "=", "[", "tx_hash", ",", "1", "]", ",", "id", "=...
Look up a transaction by hash. Args: tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json: the transaction as a json object
[ "Look", "up", "a", "transaction", "by", "hash", ".", "Args", ":", "tx_hash", ":", "(", "str", ")", "hash", "in", "the", "form", "58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d", "id", ":", "(", "int", "optional", ")", "id", "to", "use", "for"...
python
train
OiNutter/lean
lean/__init__.py
https://github.com/OiNutter/lean/blob/5d251f923acd44265ed401de14a9ead6752c543f/lean/__init__.py#L44-L46
def is_registered(ext): ''' Returns true when a template exists on an exact match of the provided file extension ''' return Lean.template_mappings.has_key(ext.lower()) and len(Lean.template_mappings[ext])
[ "def", "is_registered", "(", "ext", ")", ":", "return", "Lean", ".", "template_mappings", ".", "has_key", "(", "ext", ".", "lower", "(", ")", ")", "and", "len", "(", "Lean", ".", "template_mappings", "[", "ext", "]", ")" ]
Returns true when a template exists on an exact match of the provided file extension
[ "Returns", "true", "when", "a", "template", "exists", "on", "an", "exact", "match", "of", "the", "provided", "file", "extension" ]
python
train
lago-project/lago
lago/lago_ansible.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/lago_ansible.py#L88-L105
def _generate_entry(self, vm): """ Generate host entry for the given VM Args: vm (lago.plugins.vm.VMPlugin): The VM for which the entry should be created for. Returns: str: An entry for vm """ return \ '{name} ' \ 'ansible_host={ip} ' \ 'ansible_ssh_private_key_file={key}'.format( name=vm.name(), ip=vm.ip(), key=self.prefix.paths.ssh_id_rsa() )
[ "def", "_generate_entry", "(", "self", ",", "vm", ")", ":", "return", "'{name} '", "'ansible_host={ip} '", "'ansible_ssh_private_key_file={key}'", ".", "format", "(", "name", "=", "vm", ".", "name", "(", ")", ",", "ip", "=", "vm", ".", "ip", "(", ")", ",",...
Generate host entry for the given VM Args: vm (lago.plugins.vm.VMPlugin): The VM for which the entry should be created for. Returns: str: An entry for vm
[ "Generate", "host", "entry", "for", "the", "given", "VM", "Args", ":", "vm", "(", "lago", ".", "plugins", ".", "vm", ".", "VMPlugin", ")", ":", "The", "VM", "for", "which", "the", "entry", "should", "be", "created", "for", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/tools/MAVExplorer.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/tools/MAVExplorer.py#L119-L142
def menu_callback(m): '''called on menu selection''' if m.returnkey.startswith('# '): cmd = m.returnkey[2:] if m.handler is not None: if m.handler_result is None: return cmd += m.handler_result process_stdin(cmd) elif m.returnkey == 'menuSettings': wxsettings.WXSettings(mestate.settings) elif m.returnkey.startswith("mode-"): idx = int(m.returnkey[5:]) mestate.flightmode_selections[idx] = m.IsChecked() elif m.returnkey.startswith("loadLog"): print("File: " + m.returnkey[8:]) elif m.returnkey == 'quit': mestate.console.close() mestate.exit = True print("Exited. Press Enter to continue.") sys.exit(0) else: print('Unknown menu selection: %s' % m.returnkey)
[ "def", "menu_callback", "(", "m", ")", ":", "if", "m", ".", "returnkey", ".", "startswith", "(", "'# '", ")", ":", "cmd", "=", "m", ".", "returnkey", "[", "2", ":", "]", "if", "m", ".", "handler", "is", "not", "None", ":", "if", "m", ".", "hand...
called on menu selection
[ "called", "on", "menu", "selection" ]
python
train
alexprengere/currencyconverter
currency_converter/currency_converter.py
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/currency_converter.py#L147-L159
def load_file(self, currency_file): """To be subclassed if alternate methods of loading data. """ if currency_file.startswith(('http://', 'https://')): content = urlopen(currency_file).read() else: with open(currency_file, 'rb') as f: content = f.read() if currency_file.endswith('.zip'): self.load_lines(get_lines_from_zip(content)) else: self.load_lines(content.decode('utf-8').splitlines())
[ "def", "load_file", "(", "self", ",", "currency_file", ")", ":", "if", "currency_file", ".", "startswith", "(", "(", "'http://'", ",", "'https://'", ")", ")", ":", "content", "=", "urlopen", "(", "currency_file", ")", ".", "read", "(", ")", "else", ":", ...
To be subclassed if alternate methods of loading data.
[ "To", "be", "subclassed", "if", "alternate", "methods", "of", "loading", "data", "." ]
python
test
adamzap/landslide
landslide/generator.py
https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L184-L196
def toc(self): """ Smart getter for Table of Content list. """ toc = [] stack = [toc] for entry in self.__toc: entry['sub'] = [] while entry['level'] < len(stack): stack.pop() while entry['level'] > len(stack): stack.append(stack[-1][-1]['sub']) stack[-1].append(entry) return toc
[ "def", "toc", "(", "self", ")", ":", "toc", "=", "[", "]", "stack", "=", "[", "toc", "]", "for", "entry", "in", "self", ".", "__toc", ":", "entry", "[", "'sub'", "]", "=", "[", "]", "while", "entry", "[", "'level'", "]", "<", "len", "(", "sta...
Smart getter for Table of Content list.
[ "Smart", "getter", "for", "Table", "of", "Content", "list", "." ]
python
train
jacobtomlinson/datapoint-python
datapoint/Manager.py
https://github.com/jacobtomlinson/datapoint-python/blob/1d3f596f21975f42c1484f5a9c3ff057de0b47ae/datapoint/Manager.py#L280-L288
def get_nearest_site(self, latitude=None, longitude=None): """ Deprecated. This function returns nearest Site object to the specified coordinates. """ warning_message = 'This function is deprecated. Use get_nearest_forecast_site() instead' warn(warning_message, DeprecationWarning, stacklevel=2) return self.get_nearest_forecast_site(latitude, longitude)
[ "def", "get_nearest_site", "(", "self", ",", "latitude", "=", "None", ",", "longitude", "=", "None", ")", ":", "warning_message", "=", "'This function is deprecated. Use get_nearest_forecast_site() instead'", "warn", "(", "warning_message", ",", "DeprecationWarning", ",",...
Deprecated. This function returns nearest Site object to the specified coordinates.
[ "Deprecated", ".", "This", "function", "returns", "nearest", "Site", "object", "to", "the", "specified", "coordinates", "." ]
python
train
CitrineInformatics/python-citrination-client
citrination_client/search/client.py
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/search/client.py#L140-L152
def pif_multi_search(self, multi_query): """ Run each in a list of PIF queries against Citrination. :param multi_query: :class:`MultiQuery` object to execute. :return: :class:`PifMultiSearchResult` object with the results of the query. """ failure_message = "Error while making PIF multi search request" response_dict = self._get_success_json( self._post(routes.pif_multi_search, data=json.dumps(multi_query, cls=QueryEncoder), failure_message=failure_message)) return PifMultiSearchResult(**keys_to_snake_case(response_dict['results']))
[ "def", "pif_multi_search", "(", "self", ",", "multi_query", ")", ":", "failure_message", "=", "\"Error while making PIF multi search request\"", "response_dict", "=", "self", ".", "_get_success_json", "(", "self", ".", "_post", "(", "routes", ".", "pif_multi_search", ...
Run each in a list of PIF queries against Citrination. :param multi_query: :class:`MultiQuery` object to execute. :return: :class:`PifMultiSearchResult` object with the results of the query.
[ "Run", "each", "in", "a", "list", "of", "PIF", "queries", "against", "Citrination", "." ]
python
valid
berkeley-cocosci/Wallace
wallace/custom.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L89-L97
def success_response(field=None, data=None, request_type=""): """Return a generic success response.""" data_out = {} data_out["status"] = "success" if field: data_out[field] = data print("{} request successful.".format(request_type)) js = dumps(data_out, default=date_handler) return Response(js, status=200, mimetype='application/json')
[ "def", "success_response", "(", "field", "=", "None", ",", "data", "=", "None", ",", "request_type", "=", "\"\"", ")", ":", "data_out", "=", "{", "}", "data_out", "[", "\"status\"", "]", "=", "\"success\"", "if", "field", ":", "data_out", "[", "field", ...
Return a generic success response.
[ "Return", "a", "generic", "success", "response", "." ]
python
train
icio/evil
evil/__init__.py
https://github.com/icio/evil/blob/6f12d16652951fb60ac238cef203eaa585ec0a28/evil/__init__.py#L13-L137
def evil(expr, lookup, operators, cast, reducer, tokenizer): """evil evaluates an expression according to the eval description given. :param expr: An expression to evaluate. :param lookup: A callable which takes a single pattern argument and returns a set of results. The pattern can be anything that is not an operator token or round brackets. :param operators: A precedence-ordered dictionary of (function, side) tuples keyed on the operator token. :param reducer: A callable which takes a sequential list of values (from operations or lookups) and combines them into a result. Typical behaviour is that of the + operator. The return type should be the same as cast. :param cast: A callable which transforms the results of the lookup into the type expected by the operators and the type of the result. :param tokenizer: A callable which will break the query into tokens for evaluation per the lookup and operators. Defaults to setquery.query_tokenizer. :raises: SyntaxError :returns: """ operators = OrderedDict((op[0], op[1:]) for op in operators) if "(" in operators or ")" in operators: raise ValueError("( and ) are reserved operators") operator_tokens = ["(", ")"] + operators.keys() tokens = iter(tokenizer(expr, operator_tokens)) levels = [[]] while True: # Token evaluation and pattern lookups expr = levels.pop() # The currently-constructed expression new_level = False # We should step into a subexpression first_token = len(expr) == 0 # The first (sub)exp. token prev_op_side = None # The side of the last-seen operator try: # Try to get the side of the last operator from an expression # which we are going to continue constructing. prev_op_side = operators[expr[-1]][1] except: pass for token in tokens: if token == "(": new_level = True break elif token == ")": break elif token in operators: op_side = operators[token][1] if first_token and op_side & OP_LEFT: raise SyntaxError("Operators which act on expressions to " "their left or both sides cannot be at " "the beginning of an expression.") if prev_op_side is not None: if prev_op_side & OP_RIGHT and op_side & OP_LEFT: raise SyntaxError("Operators cannot be beside one " "another if they act on expressions " "facing one-another.") expr.append(token) prev_op_side = op_side continue else: expr.append(cast(lookup(token))) prev_op_side = None first_token = False if new_level: levels.append(expr) levels.append([]) continue elif prev_op_side is not None and prev_op_side & OP_RIGHT: raise SyntaxError("Operators which act on expressions to their " "right or both sides cannot be at the end of " "an expression.") # Operator evaluation explen = len(expr) for op, (op_eval, op_side) in operators.iteritems(): if op_side is OP_RIGHT: # Apply right-sided operators. We loop from the end backward so # that multiple such operators next to noe another are resolved # in the correct order t = explen - 1 while t >= 0: if expr[t] == op: expr[t] = op_eval(expr[t + 1]) del expr[t + 1] explen -= 1 t -= 1 else: # Apply left- and both-sided operators. We loop forward so that # that multiple such operators next to one another are resolved # in the correct order. t = 0 while t < explen: if expr[t] == op: # Apply left- or both-sided operators if op_side is OP_LEFT: expr[t] = op_eval(expr[t - 1]) del expr[t - 1] t -= 1 explen -= 1 elif op_side is OP_BOTH: expr[t] = op_eval(expr[t - 1], expr[t + 1]) del expr[t + 1], expr[t - 1] t -= 1 explen -= 2 t += 1 if len(levels) > 0: levels[-1].append(reducer(expr)) else: break return reducer(expr)
[ "def", "evil", "(", "expr", ",", "lookup", ",", "operators", ",", "cast", ",", "reducer", ",", "tokenizer", ")", ":", "operators", "=", "OrderedDict", "(", "(", "op", "[", "0", "]", ",", "op", "[", "1", ":", "]", ")", "for", "op", "in", "operator...
evil evaluates an expression according to the eval description given. :param expr: An expression to evaluate. :param lookup: A callable which takes a single pattern argument and returns a set of results. The pattern can be anything that is not an operator token or round brackets. :param operators: A precedence-ordered dictionary of (function, side) tuples keyed on the operator token. :param reducer: A callable which takes a sequential list of values (from operations or lookups) and combines them into a result. Typical behaviour is that of the + operator. The return type should be the same as cast. :param cast: A callable which transforms the results of the lookup into the type expected by the operators and the type of the result. :param tokenizer: A callable which will break the query into tokens for evaluation per the lookup and operators. Defaults to setquery.query_tokenizer. :raises: SyntaxError :returns:
[ "evil", "evaluates", "an", "expression", "according", "to", "the", "eval", "description", "given", "." ]
python
train
Esri/ArcREST
src/arcrest/manageportal/administration.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageportal/administration.py#L796-L808
def updateAppInfo(self, appInfo): """ This operation allows you to update the OAuth-specific properties associated with an application. Use the Get App Info operation to obtain the existing OAuth properties that can be edited. """ params = {"f" : "json", "appInfo" : appInfo} url = self._url + "/oauth/updateAppInfo" return self._post(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "updateAppInfo", "(", "self", ",", "appInfo", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"appInfo\"", ":", "appInfo", "}", "url", "=", "self", ".", "_url", "+", "\"/oauth/updateAppInfo\"", "return", "self", ".", "_post", "(", "...
This operation allows you to update the OAuth-specific properties associated with an application. Use the Get App Info operation to obtain the existing OAuth properties that can be edited.
[ "This", "operation", "allows", "you", "to", "update", "the", "OAuth", "-", "specific", "properties", "associated", "with", "an", "application", ".", "Use", "the", "Get", "App", "Info", "operation", "to", "obtain", "the", "existing", "OAuth", "properties", "tha...
python
train
couchbase/couchbase-python-client
couchbase/views/params.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/views/params.py#L404-L446
def from_any(cls, params, **ctor_opts): """ Creates a new Query object from input. :param params: Parameter to convert to query :type params: dict, string, or :class:`Query` If ``params`` is a :class:`Query` object already, a deep copy is made and a new :class:`Query` object is returned. If ``params`` is a string, then a :class:`Query` object is contructed from it. The string itself is not parsed, but rather prepended to any additional parameters (defined via the object's methods) with an additional ``&`` characted. If ``params`` is a dictionary, it is passed to the :class:`Query` constructor. :return: a new :class:`Query` object :raise: :exc:`ArgumentError` if the input is none of the acceptable types mentioned above. Also raises any exceptions possibly thrown by the constructor. """ if isinstance(params, cls): return deepcopy(params) elif isinstance(params, dict): ctor_opts.update(**params) if cls is QueryBase: if ('bbox' in params or 'start_range' in params or 'end_range' in params): return SpatialQuery(**ctor_opts) else: return ViewQuery(**ctor_opts) elif isinstance(params, basestring): ret = cls() ret._base_str = params return ret else: raise ArgumentError.pyexc("Params must be Query, dict, or string")
[ "def", "from_any", "(", "cls", ",", "params", ",", "*", "*", "ctor_opts", ")", ":", "if", "isinstance", "(", "params", ",", "cls", ")", ":", "return", "deepcopy", "(", "params", ")", "elif", "isinstance", "(", "params", ",", "dict", ")", ":", "ctor_o...
Creates a new Query object from input. :param params: Parameter to convert to query :type params: dict, string, or :class:`Query` If ``params`` is a :class:`Query` object already, a deep copy is made and a new :class:`Query` object is returned. If ``params`` is a string, then a :class:`Query` object is contructed from it. The string itself is not parsed, but rather prepended to any additional parameters (defined via the object's methods) with an additional ``&`` characted. If ``params`` is a dictionary, it is passed to the :class:`Query` constructor. :return: a new :class:`Query` object :raise: :exc:`ArgumentError` if the input is none of the acceptable types mentioned above. Also raises any exceptions possibly thrown by the constructor.
[ "Creates", "a", "new", "Query", "object", "from", "input", "." ]
python
train
aiortc/aiortc
aiortc/rtcsctptransport.py
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcsctptransport.py#L844-L848
async def _receive(self, stream_id, pp_id, data): """ Receive data stream -> ULP. """ await self._data_channel_receive(stream_id, pp_id, data)
[ "async", "def", "_receive", "(", "self", ",", "stream_id", ",", "pp_id", ",", "data", ")", ":", "await", "self", ".", "_data_channel_receive", "(", "stream_id", ",", "pp_id", ",", "data", ")" ]
Receive data stream -> ULP.
[ "Receive", "data", "stream", "-", ">", "ULP", "." ]
python
train
saltstack/salt
salt/utils/files.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/files.py#L702-L710
def remove(path): ''' Runs os.remove(path) and suppresses the OSError if the file doesn't exist ''' try: os.remove(path) except OSError as exc: if exc.errno != errno.ENOENT: raise
[ "def", "remove", "(", "path", ")", ":", "try", ":", "os", ".", "remove", "(", "path", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errno", "!=", "errno", ".", "ENOENT", ":", "raise" ]
Runs os.remove(path) and suppresses the OSError if the file doesn't exist
[ "Runs", "os", ".", "remove", "(", "path", ")", "and", "suppresses", "the", "OSError", "if", "the", "file", "doesn", "t", "exist" ]
python
train
BerkeleyAutomation/perception
perception/image.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L252-L291
def from_array(x, frame='unspecified'): """ Converts an array of data to an Image based on the values in the array and the data format. """ if not Image.can_convert(x): raise ValueError('Cannot convert array to an Image!') dtype = x.dtype height = x.shape[0] width = x.shape[1] channels = 1 if len(x.shape) == 3: channels = x.shape[2] if dtype == np.uint8: if channels == 1: if np.any((x % BINARY_IM_MAX_VAL) > 0): return GrayscaleImage(x, frame) return BinaryImage(x, frame) elif channels == 3: return ColorImage(x, frame) else: raise ValueError( 'No available image conversion for uint8 array with 2 channels') elif dtype == np.uint16: if channels != 1: raise ValueError( 'No available image conversion for uint16 array with 2 or 3 channels') return GrayscaleImage(x, frame) elif dtype == np.float32 or dtype == np.float64: if channels == 1: return DepthImage(x, frame) elif channels == 2: return GdImage(x, frame) elif channels == 3: logging.warning('Converting float array to uint8') return ColorImage(x.astype(np.uint8), frame) return RgbdImage(x, frame) else: raise ValueError( 'Conversion for dtype %s not supported!' % (str(dtype)))
[ "def", "from_array", "(", "x", ",", "frame", "=", "'unspecified'", ")", ":", "if", "not", "Image", ".", "can_convert", "(", "x", ")", ":", "raise", "ValueError", "(", "'Cannot convert array to an Image!'", ")", "dtype", "=", "x", ".", "dtype", "height", "=...
Converts an array of data to an Image based on the values in the array and the data format.
[ "Converts", "an", "array", "of", "data", "to", "an", "Image", "based", "on", "the", "values", "in", "the", "array", "and", "the", "data", "format", "." ]
python
train
openstack/proliantutils
proliantutils/redfish/redfish.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/redfish.py#L854-L878
def get_essential_properties(self): """Constructs the dictionary of essential properties Constructs the dictionary of essential properties, named cpu, cpu_arch, local_gb, memory_mb. The MACs are also returned as part of this method. """ sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) try: # TODO(nisha): Add local_gb here and return after # local_gb changes are merged. # local_gb = sushy_system.storage_summary prop = {'memory_mb': (sushy_system.memory_summary.size_gib * 1024), 'cpus': sushy_system.processors.summary.count, 'cpu_arch': sushy_map.PROCESSOR_ARCH_VALUE_MAP_REV.get( sushy_system.processors.summary.architecture), 'local_gb': common_storage.get_local_gb(sushy_system)} return {'properties': prop, 'macs': sushy_system.ethernet_interfaces.summary} except sushy.exceptions.SushyError as e: msg = (self._('The Redfish controller failed to get the ' 'resource data. Error %(error)s') % {'error': str(e)}) LOG.debug(msg) raise exception.IloError(msg)
[ "def", "get_essential_properties", "(", "self", ")", ":", "sushy_system", "=", "self", ".", "_get_sushy_system", "(", "PROLIANT_SYSTEM_ID", ")", "try", ":", "# TODO(nisha): Add local_gb here and return after", "# local_gb changes are merged.", "# local_gb = sushy_system.storage_s...
Constructs the dictionary of essential properties Constructs the dictionary of essential properties, named cpu, cpu_arch, local_gb, memory_mb. The MACs are also returned as part of this method.
[ "Constructs", "the", "dictionary", "of", "essential", "properties" ]
python
train
fishtown-analytics/dbt
core/dbt/clients/_jinja_blocks.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/clients/_jinja_blocks.py#L182-L235
def handle_block(self, match, block_start=None): """Handle a block. The current state of the parser should be after the open block is completed: {% blk foo %}my data {% endblk %} ^ right here """ # we have to handle comments inside blocks because you could do this: # {% blk foo %}asdf {# {% endblk %} #} {%endblk%} # they still end up in the data/raw_data of the block itself, but we # have to know to ignore stuff until the end comment marker! found = BlockTag(**match.groupdict()) # the full block started at the given match start, which may include # prefixed whitespace! we'll strip it later if block_start is None: block_start = match.start() self._block_contents = '' # you can have as many comments in your block as you'd like! while True: match = self._expect_match( '"{}"'.format(found.end_block_type_name), found.end_pat(), COMMENT_START_PATTERN, RAW_START_PATTERN, regex('''(?P<quote>(['"]))''') ) groups = match.groupdict() if groups.get('endblock') is not None: break self.advance(match.end()) if groups.get('comment_start') is not None: self.expect_comment_end() elif groups.get('raw_start') is not None: self.expect_raw_end() elif groups.get('quote') is not None: self.rewind() match = self._expect_match('any string', STRING_PATTERN) self.advance(match.end()) else: raise dbt.exceptions.InternalException( 'unhandled regex in handle_block, no match: {}' .format(groups) ) # we want to advance to just the end tag at first, to extract the # contents self.advance(match.start()) found.contents = self._block_contents self._block_contents = None # now advance to the end self.advance(match.end()) found.full_block = self.data[block_start:self.pos] return found
[ "def", "handle_block", "(", "self", ",", "match", ",", "block_start", "=", "None", ")", ":", "# we have to handle comments inside blocks because you could do this:", "# {% blk foo %}asdf {# {% endblk %} #} {%endblk%}", "# they still end up in the data/raw_data of the block itself, but we...
Handle a block. The current state of the parser should be after the open block is completed: {% blk foo %}my data {% endblk %} ^ right here
[ "Handle", "a", "block", ".", "The", "current", "state", "of", "the", "parser", "should", "be", "after", "the", "open", "block", "is", "completed", ":", "{", "%", "blk", "foo", "%", "}", "my", "data", "{", "%", "endblk", "%", "}", "^", "right", "her...
python
train
has2k1/plotnine
plotnine/themes/seaborn_rcmod.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/seaborn_rcmod.py#L377-L408
def set_context(context=None, font_scale=1, rc=None): """Set the plotting context parameters. This affects things like the size of the labels, lines, and other elements of the plot, but not the overall style. The base context is "notebook", and the other contexts are "paper", "talk", and "poster", which are version of the notebook parameters scaled by .8, 1.3, and 1.6, respectively. Parameters ---------- context : dict, None, or one of {paper, notebook, talk, poster} A dictionary of parameters or the name of a preconfigured set. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. rc : dict, optional Parameter mappings to override the values in the preset seaborn context dictionaries. This only updates parameters that are considered part of the context definition. Examples -------- >>> set_context("paper") >>> set_context("talk", font_scale=1.4) >>> set_context("talk", rc={"lines.linewidth": 2}) See Also -------- plotting_context : return a dictionary of rc parameters, or use in a ``with`` statement to temporarily set the context. set_style : set the default parameters for figure style set_palette : set the default color palette for figures """ context_object = plotting_context(context, font_scale, rc) mpl.rcParams.update(context_object)
[ "def", "set_context", "(", "context", "=", "None", ",", "font_scale", "=", "1", ",", "rc", "=", "None", ")", ":", "context_object", "=", "plotting_context", "(", "context", ",", "font_scale", ",", "rc", ")", "mpl", ".", "rcParams", ".", "update", "(", ...
Set the plotting context parameters. This affects things like the size of the labels, lines, and other elements of the plot, but not the overall style. The base context is "notebook", and the other contexts are "paper", "talk", and "poster", which are version of the notebook parameters scaled by .8, 1.3, and 1.6, respectively. Parameters ---------- context : dict, None, or one of {paper, notebook, talk, poster} A dictionary of parameters or the name of a preconfigured set. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. rc : dict, optional Parameter mappings to override the values in the preset seaborn context dictionaries. This only updates parameters that are considered part of the context definition. Examples -------- >>> set_context("paper") >>> set_context("talk", font_scale=1.4) >>> set_context("talk", rc={"lines.linewidth": 2}) See Also -------- plotting_context : return a dictionary of rc parameters, or use in a ``with`` statement to temporarily set the context. set_style : set the default parameters for figure style set_palette : set the default color palette for figures
[ "Set", "the", "plotting", "context", "parameters", ".", "This", "affects", "things", "like", "the", "size", "of", "the", "labels", "lines", "and", "other", "elements", "of", "the", "plot", "but", "not", "the", "overall", "style", ".", "The", "base", "conte...
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/example_imager_spark/example_spark_imager.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/example_imager_spark/example_spark_imager.py#L43-L126
def node_run(input_file, coords_only, bc_settings, bc_grid_weights): """Main function to process visibility data on Spark cluster nodes. Args: input_file (str): RDD element containing filename to process. coords_only (boolean): If true, read only baseline coordinates to define the weights grid. bc_settings (pyspark.broadcast.Broadcast): Spark broadcast variable containing pipeline settings dictionary. bc_grid_weights (pyspark.broadcast.Broadcast): Spark broadcast variable containing weights grid. May be None. Returns: tuple: Output RDD element. """ # Create a logger. log = logging.getLogger('pyspark') log.setLevel(logging.INFO) if len(log.handlers) == 0: log.addHandler(logging.StreamHandler(sys.stdout)) # Create an imager and configure it. precision = bc_settings.value['precision'] imager = oskar.Imager(precision) for key, value in bc_settings.value['imager'].items(): setattr(imager, key, value) grid_size = imager.plane_size grid_weights = None # Get a handle to the input Measurement Set. ms_han = oskar.MeasurementSet.open(input_file) # Check if doing a first pass. if coords_only: # If necessary, generate a local weights grid. if imager.weighting == 'Uniform': grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision) # Do a first pass for uniform weighting or W-projection. log.info('Reading coordinates from %s', input_file) imager.coords_only = True process_input_data(ms_han, imager, None, grid_weights) imager.coords_only = False # Return weights grid and required number of W-planes as RDD element. return grid_weights, imager.num_w_planes # Allocate a local visibility grid on the node. grid_data = numpy.zeros([grid_size, grid_size], dtype='c8' if precision == 'single' else 'c16') # Process data according to mode. log.info('Reading visibilities from %s', input_file) if bc_settings.value['combine']: # Get weights grid from Spark Broadcast variable. if imager.weighting == 'Uniform': grid_weights = bc_grid_weights.value # Populate the local visibility grid. grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights) # Return grid as RDD element. log.info('Returning gridded visibilities to RDD') return grid_data, grid_norm else: # If necessary, generate a local weights grid. if imager.weighting == 'Uniform': grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision) # If necessary, do a first pass for uniform weighting or W-projection. if imager.weighting == 'Uniform' or imager.algorithm == 'W-projection': imager.coords_only = True process_input_data(ms_han, imager, None, grid_weights) imager.coords_only = False # Populate the local visibility grid. grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights) # Save image by finalising grid. output_file = splitext(input_file)[0] + '.fits' save_image(imager, grid_data, grid_norm, output_file) log.info('Finished. Output file is %s', output_file) return 0
[ "def", "node_run", "(", "input_file", ",", "coords_only", ",", "bc_settings", ",", "bc_grid_weights", ")", ":", "# Create a logger.", "log", "=", "logging", ".", "getLogger", "(", "'pyspark'", ")", "log", ".", "setLevel", "(", "logging", ".", "INFO", ")", "i...
Main function to process visibility data on Spark cluster nodes. Args: input_file (str): RDD element containing filename to process. coords_only (boolean): If true, read only baseline coordinates to define the weights grid. bc_settings (pyspark.broadcast.Broadcast): Spark broadcast variable containing pipeline settings dictionary. bc_grid_weights (pyspark.broadcast.Broadcast): Spark broadcast variable containing weights grid. May be None. Returns: tuple: Output RDD element.
[ "Main", "function", "to", "process", "visibility", "data", "on", "Spark", "cluster", "nodes", "." ]
python
train
henrysher/kotocore
kotocore/cache.py
https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/cache.py#L170-L192
def del_resource(self, service_name, resource_name, base_class=None): """ Deletes a resource class for a given service. Fails silently if no connection is found in the cache. :param service_name: The service a given ``Resource`` talks to. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param base_class: (Optional) The base class of the object. Prevents "magically" loading the wrong class (one with a different base). Default is ``default``. :type base_class: class """ # Unlike ``get_resource``, this should be fire & forget. # We don't really care, as long as it's not in the cache any longer. try: classpath = self.build_classpath(base_class) opts = self.services[service_name]['resources'][resource_name] del opts[classpath] except KeyError: pass
[ "def", "del_resource", "(", "self", ",", "service_name", ",", "resource_name", ",", "base_class", "=", "None", ")", ":", "# Unlike ``get_resource``, this should be fire & forget.", "# We don't really care, as long as it's not in the cache any longer.", "try", ":", "classpath", ...
Deletes a resource class for a given service. Fails silently if no connection is found in the cache. :param service_name: The service a given ``Resource`` talks to. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param base_class: (Optional) The base class of the object. Prevents "magically" loading the wrong class (one with a different base). Default is ``default``. :type base_class: class
[ "Deletes", "a", "resource", "class", "for", "a", "given", "service", "." ]
python
train
Karaage-Cluster/karaage
karaage/plugins/kgapplications/views/aed.py
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/plugins/kgapplications/views/aed.py#L392-L409
def view(self, request, application, label, roles, actions): """ Django get_next_action method. """ if application.content_type.model == 'applicant': if not application.applicant.email_verified: application.applicant.email_verified = True application.applicant.save() for action in actions: if action in request.POST: return action link, is_secret = base.get_email_link(application) return render( template_name='kgapplications/project_aed_introduction.html', context={ 'actions': actions, 'application': application, 'roles': roles, 'link': link, 'is_secret': is_secret, }, request=request)
[ "def", "view", "(", "self", ",", "request", ",", "application", ",", "label", ",", "roles", ",", "actions", ")", ":", "if", "application", ".", "content_type", ".", "model", "==", "'applicant'", ":", "if", "not", "application", ".", "applicant", ".", "em...
Django get_next_action method.
[ "Django", "get_next_action", "method", "." ]
python
train
Parsl/parsl
parsl/providers/condor/condor.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/providers/condor/condor.py#L108-L126
def _status(self): """Update the resource dictionary with job statuses.""" job_id_list = ' '.join(self.resources.keys()) cmd = "condor_q {0} -af:jr JobStatus".format(job_id_list) retcode, stdout, stderr = super().execute_wait(cmd) """ Example output: $ condor_q 34524642.0 34524643.0 -af:jr JobStatus 34524642.0 2 34524643.0 1 """ for line in stdout.strip().split('\n'): parts = line.split() job_id = parts[0] status = translate_table.get(parts[1], 'UNKNOWN') self.resources[job_id]['status'] = status
[ "def", "_status", "(", "self", ")", ":", "job_id_list", "=", "' '", ".", "join", "(", "self", ".", "resources", ".", "keys", "(", ")", ")", "cmd", "=", "\"condor_q {0} -af:jr JobStatus\"", ".", "format", "(", "job_id_list", ")", "retcode", ",", "stdout", ...
Update the resource dictionary with job statuses.
[ "Update", "the", "resource", "dictionary", "with", "job", "statuses", "." ]
python
valid
swisscom/cleanerversion
versions/models.py
https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/models.py#L518-L529
def _clone(self, *args, **kwargs): """ Overrides the QuerySet._clone method by adding the cloning of the VersionedQuerySet's query_time parameter :param kwargs: Same as the original QuerySet._clone params :return: Just as QuerySet._clone, this method returns a clone of the original object """ clone = super(VersionedQuerySet, self)._clone(**kwargs) clone.querytime = self.querytime return clone
[ "def", "_clone", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "clone", "=", "super", "(", "VersionedQuerySet", ",", "self", ")", ".", "_clone", "(", "*", "*", "kwargs", ")", "clone", ".", "querytime", "=", "self", ".", "queryti...
Overrides the QuerySet._clone method by adding the cloning of the VersionedQuerySet's query_time parameter :param kwargs: Same as the original QuerySet._clone params :return: Just as QuerySet._clone, this method returns a clone of the original object
[ "Overrides", "the", "QuerySet", ".", "_clone", "method", "by", "adding", "the", "cloning", "of", "the", "VersionedQuerySet", "s", "query_time", "parameter" ]
python
train
ScottDuckworth/python-anyvcs
anyvcs/svn.py
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/svn.py#L118-L138
def clone(cls, srcpath, destpath): """Copy a main repository to a new location.""" try: os.makedirs(destpath) except OSError as e: if not e.errno == errno.EEXIST: raise cmd = [SVNADMIN, 'dump', '--quiet', '.'] dump = subprocess.Popen( cmd, cwd=srcpath, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) repo = cls.create(destpath) repo.load(dump.stdout) stderr = dump.stderr.read() dump.stdout.close() dump.stderr.close() dump.wait() if dump.returncode != 0: raise subprocess.CalledProcessError(dump.returncode, cmd, stderr) return repo
[ "def", "clone", "(", "cls", ",", "srcpath", ",", "destpath", ")", ":", "try", ":", "os", ".", "makedirs", "(", "destpath", ")", "except", "OSError", "as", "e", ":", "if", "not", "e", ".", "errno", "==", "errno", ".", "EEXIST", ":", "raise", "cmd", ...
Copy a main repository to a new location.
[ "Copy", "a", "main", "repository", "to", "a", "new", "location", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/ptp_state/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/ptp_state/__init__.py#L238-L259
def _set_parent_port_detail(self, v, load=False): """ Setter method for parent_port_detail, mapped from YANG variable /ptp_state/parent_port_detail (container) If this variable is read-only (config: false) in the source YANG file, then _set_parent_port_detail is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_parent_port_detail() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=parent_port_detail.parent_port_detail, is_container='container', presence=False, yang_name="parent-port-detail", rest_name="parent-port-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ptp-parent-port-detail', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ptp-operational', defining_module='brocade-ptp-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """parent_port_detail must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=parent_port_detail.parent_port_detail, is_container='container', presence=False, yang_name="parent-port-detail", rest_name="parent-port-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ptp-parent-port-detail', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ptp-operational', defining_module='brocade-ptp-operational', yang_type='container', is_config=False)""", }) self.__parent_port_detail = t if hasattr(self, '_set'): self._set()
[ "def", "_set_parent_port_detail", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ","...
Setter method for parent_port_detail, mapped from YANG variable /ptp_state/parent_port_detail (container) If this variable is read-only (config: false) in the source YANG file, then _set_parent_port_detail is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_parent_port_detail() directly.
[ "Setter", "method", "for", "parent_port_detail", "mapped", "from", "YANG", "variable", "/", "ptp_state", "/", "parent_port_detail", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "so...
python
train
architv/soccer-cli
soccer/writers.py
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L244-L256
def team_players(self, team): """Store output of team players to a CSV file""" headers = ['Jersey Number', 'Name', 'Position', 'Nationality', 'Date of Birth'] result = [headers] result.extend([player['shirtNumber'], player['name'], player['position'], player['nationality'], player['dateOfBirth']] for player in team) self.generate_output(result)
[ "def", "team_players", "(", "self", ",", "team", ")", ":", "headers", "=", "[", "'Jersey Number'", ",", "'Name'", ",", "'Position'", ",", "'Nationality'", ",", "'Date of Birth'", "]", "result", "=", "[", "headers", "]", "result", ".", "extend", "(", "[", ...
Store output of team players to a CSV file
[ "Store", "output", "of", "team", "players", "to", "a", "CSV", "file" ]
python
train
tanghaibao/goatools
goatools/gosubdag/plot/go_name_shorten.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go_name_shorten.py#L131-L136
def _keep_this(self, name): """Return True if there are to be no modifications to name.""" for keep_name in self.keep: if name == keep_name: return True return False
[ "def", "_keep_this", "(", "self", ",", "name", ")", ":", "for", "keep_name", "in", "self", ".", "keep", ":", "if", "name", "==", "keep_name", ":", "return", "True", "return", "False" ]
Return True if there are to be no modifications to name.
[ "Return", "True", "if", "there", "are", "to", "be", "no", "modifications", "to", "name", "." ]
python
train
tanghaibao/jcvi
jcvi/assembly/goldenpath.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L1061-L1100
def neighbor(args): """ %prog neighbor agpfile componentID Check overlaps of a particular component in agpfile. """ p = OptionParser(neighbor.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) agpfile, componentID = args fastadir = "fasta" cmd = "grep" cmd += " --color -C2 {0} {1}".format(componentID, agpfile) sh(cmd) agp = AGP(agpfile) aorder = agp.order if not componentID in aorder: print("Record {0} not present in `{1}`."\ .format(componentID, agpfile), file=sys.stderr) return i, c = aorder[componentID] north, south = agp.getNorthSouthClone(i) if not north.isCloneGap: ar = [north.component_id, componentID, "--dir=" + fastadir] if north.orientation == '-': ar += ["--qreverse"] overlap(ar) if not south.isCloneGap: ar = [componentID, south.component_id, "--dir=" + fastadir] if c.orientation == '-': ar += ["--qreverse"] overlap(ar)
[ "def", "neighbor", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "neighbor", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", ...
%prog neighbor agpfile componentID Check overlaps of a particular component in agpfile.
[ "%prog", "neighbor", "agpfile", "componentID" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1238-L1248
def autoencoder_ordered_text_small(): """Ordered discrete autoencoder model for text, small version.""" hparams = autoencoder_ordered_text() hparams.bottleneck_bits = 32 hparams.num_hidden_layers = 3 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.autoregressive_mode = "conv5" hparams.sample_height = 4 return hparams
[ "def", "autoencoder_ordered_text_small", "(", ")", ":", "hparams", "=", "autoencoder_ordered_text", "(", ")", "hparams", ".", "bottleneck_bits", "=", "32", "hparams", ".", "num_hidden_layers", "=", "3", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "...
Ordered discrete autoencoder model for text, small version.
[ "Ordered", "discrete", "autoencoder", "model", "for", "text", "small", "version", "." ]
python
train