repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
michal-stuglik/django-blastplus
blastplus/forms.py
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/forms.py#L177-L187
def check_allowed_letters(seq, allowed_letters_as_set): """Validate sequence: Rise an error if sequence contains undesirable letters. """ # set of unique letters in sequence seq_set = set(seq) not_allowed_letters_in_seq = [x for x in seq_set if str(x).upper() not in allowed_letters_as_set] if len(not_allowed_letters_in_seq) > 0: raise forms.ValidationError( "This sequence type cannot contain letters: " + ", ".join(not_allowed_letters_in_seq))
[ "def", "check_allowed_letters", "(", "seq", ",", "allowed_letters_as_set", ")", ":", "# set of unique letters in sequence", "seq_set", "=", "set", "(", "seq", ")", "not_allowed_letters_in_seq", "=", "[", "x", "for", "x", "in", "seq_set", "if", "str", "(", "x", "...
Validate sequence: Rise an error if sequence contains undesirable letters.
[ "Validate", "sequence", ":", "Rise", "an", "error", "if", "sequence", "contains", "undesirable", "letters", "." ]
python
train
kstaniek/condoor
condoor/drivers/jumphost.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/drivers/jumphost.py#L21-L29
def get_version_text(self): """Return the version information from Unix host.""" try: version_text = self.device.send('uname -sr', timeout=10) except CommandError: self.log("Non Unix jumphost type detected") return None raise ConnectionError("Non Unix jumphost type detected.") return version_text
[ "def", "get_version_text", "(", "self", ")", ":", "try", ":", "version_text", "=", "self", ".", "device", ".", "send", "(", "'uname -sr'", ",", "timeout", "=", "10", ")", "except", "CommandError", ":", "self", ".", "log", "(", "\"Non Unix jumphost type detec...
Return the version information from Unix host.
[ "Return", "the", "version", "information", "from", "Unix", "host", "." ]
python
train
zkbt/the-friendly-stars
thefriendlystars/constellations/constellation.py
https://github.com/zkbt/the-friendly-stars/blob/50d3f979e79e63c66629065c75595696dc79802e/thefriendlystars/constellations/constellation.py#L339-L350
def allskyfinder(self, figsize=(14, 7), **kwargs): ''' Plot an all-sky finder chart. This *does* create a new figure. ''' plt.figure(figsize=figsize) scatter = self.plot(**kwargs) plt.xlabel(r'Right Ascension ($^\circ$)'); plt.ylabel(r'Declination ($^\circ$)') #plt.title('{} in {:.1f}'.format(self.name, epoch)) plt.xlim(0, 360) plt.ylim(-90,90) return scatter
[ "def", "allskyfinder", "(", "self", ",", "figsize", "=", "(", "14", ",", "7", ")", ",", "*", "*", "kwargs", ")", ":", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "scatter", "=", "self", ".", "plot", "(", "*", "*", "kwargs", ")", ...
Plot an all-sky finder chart. This *does* create a new figure.
[ "Plot", "an", "all", "-", "sky", "finder", "chart", ".", "This", "*", "does", "*", "create", "a", "new", "figure", "." ]
python
train
twisted/vertex
vertex/sigma.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/sigma.py#L705-L711
def transloadsForPeer(self, peer): """ Returns an iterator of transloads that apply to a particular peer. """ for tl in self.transloads.itervalues(): if peer in tl.peers: yield tl
[ "def", "transloadsForPeer", "(", "self", ",", "peer", ")", ":", "for", "tl", "in", "self", ".", "transloads", ".", "itervalues", "(", ")", ":", "if", "peer", "in", "tl", ".", "peers", ":", "yield", "tl" ]
Returns an iterator of transloads that apply to a particular peer.
[ "Returns", "an", "iterator", "of", "transloads", "that", "apply", "to", "a", "particular", "peer", "." ]
python
train
HazyResearch/metal
metal/label_model/utils.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/label_model/utils.py#L39-L46
def compute_inv_covariance(L_aug, Y, k, p): """Given label matrix L and labels Y, compute the covariance. Args: L: (np.array) [n, d] The augmented (indicator) label matrix Y: (np.array int) [n] The true labels in {1,...,k} """ return np.linalg.inv(compute_covariance(L_aug, Y, k, p))
[ "def", "compute_inv_covariance", "(", "L_aug", ",", "Y", ",", "k", ",", "p", ")", ":", "return", "np", ".", "linalg", ".", "inv", "(", "compute_covariance", "(", "L_aug", ",", "Y", ",", "k", ",", "p", ")", ")" ]
Given label matrix L and labels Y, compute the covariance. Args: L: (np.array) [n, d] The augmented (indicator) label matrix Y: (np.array int) [n] The true labels in {1,...,k}
[ "Given", "label", "matrix", "L", "and", "labels", "Y", "compute", "the", "covariance", "." ]
python
train
swevm/scaleio-py
scaleiopy/scaleio.py
https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/scaleio.py#L190-L203
def volumes(self): """ Returns a `list` of all the `Volume` known to the cluster. Updates every time - no caching. :return: a `list` of all the `Volume` known to the cluster. :rtype: list """ self.connection._check_login() response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/Volume/instances")).json() all_volumes = [] for volume in response: all_volumes.append( SIO_Volume.from_dict(volume) ) return all_volumes
[ "def", "volumes", "(", "self", ")", ":", "self", ".", "connection", ".", "_check_login", "(", ")", "response", "=", "self", ".", "connection", ".", "_do_get", "(", "\"{}/{}\"", ".", "format", "(", "self", ".", "connection", ".", "_api_url", ",", "\"types...
Returns a `list` of all the `Volume` known to the cluster. Updates every time - no caching. :return: a `list` of all the `Volume` known to the cluster. :rtype: list
[ "Returns", "a", "list", "of", "all", "the", "Volume", "known", "to", "the", "cluster", ".", "Updates", "every", "time", "-", "no", "caching", ".", ":", "return", ":", "a", "list", "of", "all", "the", "Volume", "known", "to", "the", "cluster", ".", ":...
python
train
hannes-brt/hebel
hebel/pycuda_ops/linalg.py
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/linalg.py#L39-L199
def dot(x_gpu, y_gpu, transa='N', transb='N', handle=None, target=None): """ Dot product of two arrays. For 1D arrays, this function computes the inner product. For 2D arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix product; the result has shape `(m, n)`. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray Input array. y_gpu : pycuda.gpuarray.GPUArray Input array. transa : char If 'T', compute the product of the transpose of `x_gpu`. If 'C', compute the product of the Hermitian of `x_gpu`. transb : char If 'T', compute the product of the transpose of `y_gpu`. If 'C', compute the product of the Hermitian of `y_gpu`. handle : int CUBLAS context. If no context is specified, the default handle from `scikits.cuda.misc._global_cublas_handle` is used. Returns ------- c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128} Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D arrays, the result will be returned as a scalar. Notes ----- The input matrices must all contain elements of the same data type. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import linalg >>> import misc >>> linalg.init() >>> a = np.asarray(np.random.rand(4, 2), np.float32) >>> b = np.asarray(np.random.rand(2, 2), np.float32) >>> a_gpu = gpuarray.to_gpu(a) >>> b_gpu = gpuarray.to_gpu(b) >>> c_gpu = linalg.dot(a_gpu, b_gpu) >>> np.allclose(np.dot(a, b), c_gpu.get()) True >>> d = np.asarray(np.random.rand(5), np.float32) >>> e = np.asarray(np.random.rand(5), np.float32) >>> d_gpu = gpuarray.to_gpu(d) >>> e_gpu = gpuarray.to_gpu(e) >>> f = linalg.dot(d_gpu, e_gpu) >>> np.allclose(np.dot(d, e), f) True """ if handle is None: handle = _global_cublas_handle if len(x_gpu.shape) == 1 and len(y_gpu.shape) == 1: if x_gpu.size != y_gpu.size: raise ValueError('arrays must be of same length: ' 'x_gpu.size = %d, y_gpu.size = %d' % (x_gpu.size, y_gpu.size)) # Compute inner product for 1D arrays: if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCdotu elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSdot elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZdotu elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDdot else: raise ValueError('unsupported combination of input types: ' 'x_gpu.dtype = %s, y_gpu.dtype = %s' % (str(x_gpu.dtype), str(y_gpu.dtype))) return cublas_func(handle, x_gpu.size, x_gpu.gpudata, 1, y_gpu.gpudata, 1) else: # Get the shapes of the arguments (accounting for the # possibility that one of them may only have one dimension): x_shape = x_gpu.shape y_shape = y_gpu.shape if len(x_shape) == 1: x_shape = (1, x_shape[0]) if len(y_shape) == 1: y_shape = (1, y_shape[0]) # Perform matrix multiplication for 2D arrays: if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCgemm alpha = np.complex64(1.0) beta = np.complex64(0.0) elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSgemm alpha = np.float32(1.0) beta = np.float32(0.0) elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZgemm alpha = np.complex128(1.0) beta = np.complex128(0.0) elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDgemm alpha = np.float64(1.0) beta = np.float64(0.0) else: raise ValueError('unsupported combination of input types: ' 'x_gpu.dtype = %s, y_gpu.dtype = %s' % (str(x_gpu.dtype), str(y_gpu.dtype))) transa = lower(transa) transb = lower(transb) if transb in ['t', 'c']: m, k = y_shape elif transb in ['n']: k, m = y_shape else: raise ValueError('invalid value "%s" for transb' % transb) if transa in ['t', 'c']: l, n = x_shape elif transa in ['n']: n, l = x_shape else: raise ValueError('invalid value "%s" for transa' % transa) if l != k: raise ValueError('objects are not aligned: x_shape = %s, y_shape = %s' % (x_shape, y_shape)) if transb == 'n': lda = max(1, m) else: lda = max(1, k) if transa == 'n': ldb = max(1, k) else: ldb = max(1, n) ldc = max(1, m) # Note that the desired shape of the output matrix is the transpose # of what CUBLAS assumes: if target is None: target = gpuarray.empty((n, ldc), x_gpu.dtype, allocator=memory_pool.allocate) cublas_func(handle, transb, transa, m, n, k, alpha, y_gpu.gpudata, lda, x_gpu.gpudata, ldb, beta, target.gpudata, ldc) return target
[ "def", "dot", "(", "x_gpu", ",", "y_gpu", ",", "transa", "=", "'N'", ",", "transb", "=", "'N'", ",", "handle", "=", "None", ",", "target", "=", "None", ")", ":", "if", "handle", "is", "None", ":", "handle", "=", "_global_cublas_handle", "if", "len", ...
Dot product of two arrays. For 1D arrays, this function computes the inner product. For 2D arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix product; the result has shape `(m, n)`. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray Input array. y_gpu : pycuda.gpuarray.GPUArray Input array. transa : char If 'T', compute the product of the transpose of `x_gpu`. If 'C', compute the product of the Hermitian of `x_gpu`. transb : char If 'T', compute the product of the transpose of `y_gpu`. If 'C', compute the product of the Hermitian of `y_gpu`. handle : int CUBLAS context. If no context is specified, the default handle from `scikits.cuda.misc._global_cublas_handle` is used. Returns ------- c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128} Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D arrays, the result will be returned as a scalar. Notes ----- The input matrices must all contain elements of the same data type. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import linalg >>> import misc >>> linalg.init() >>> a = np.asarray(np.random.rand(4, 2), np.float32) >>> b = np.asarray(np.random.rand(2, 2), np.float32) >>> a_gpu = gpuarray.to_gpu(a) >>> b_gpu = gpuarray.to_gpu(b) >>> c_gpu = linalg.dot(a_gpu, b_gpu) >>> np.allclose(np.dot(a, b), c_gpu.get()) True >>> d = np.asarray(np.random.rand(5), np.float32) >>> e = np.asarray(np.random.rand(5), np.float32) >>> d_gpu = gpuarray.to_gpu(d) >>> e_gpu = gpuarray.to_gpu(e) >>> f = linalg.dot(d_gpu, e_gpu) >>> np.allclose(np.dot(d, e), f) True
[ "Dot", "product", "of", "two", "arrays", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/mnist.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mnist.py#L51-L66
def _extract_mnist_images(filename, num_images): """Extract images from an MNIST file into a numpy array. Args: filename: The path to an MNIST images file. num_images: The number of images in the file. Returns: A numpy array of shape [number_of_images, height, width, channels]. """ with gzip.open(filename) as bytestream: bytestream.read(16) buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images) data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1) return data
[ "def", "_extract_mnist_images", "(", "filename", ",", "num_images", ")", ":", "with", "gzip", ".", "open", "(", "filename", ")", "as", "bytestream", ":", "bytestream", ".", "read", "(", "16", ")", "buf", "=", "bytestream", ".", "read", "(", "_MNIST_IMAGE_S...
Extract images from an MNIST file into a numpy array. Args: filename: The path to an MNIST images file. num_images: The number of images in the file. Returns: A numpy array of shape [number_of_images, height, width, channels].
[ "Extract", "images", "from", "an", "MNIST", "file", "into", "a", "numpy", "array", "." ]
python
train
cocaine/cocaine-tools
cocaine/tools/dispatch.py
https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L1772-L1786
def access_grant(tp, name, cid, uid, perm, **kwargs): """ Add rights to ACL for specified entity (collection, path, etc.). Creates if necessary. """ ctx = Context(**kwargs) ctx.execute_action('access:edit:grant', **{ 'unicat': ctx.repo.create_secure_service('unicat'), 'tp': tp, 'name': name, 'cids': cid, 'uids': uid, 'perm': perm, })
[ "def", "access_grant", "(", "tp", ",", "name", ",", "cid", ",", "uid", ",", "perm", ",", "*", "*", "kwargs", ")", ":", "ctx", "=", "Context", "(", "*", "*", "kwargs", ")", "ctx", ".", "execute_action", "(", "'access:edit:grant'", ",", "*", "*", "{"...
Add rights to ACL for specified entity (collection, path, etc.). Creates if necessary.
[ "Add", "rights", "to", "ACL", "for", "specified", "entity", "(", "collection", "path", "etc", ".", ")", "." ]
python
train
bitprophet/botox
botox/aws.py
https://github.com/bitprophet/botox/blob/02c887a28bd2638273548cc7d1e6d6f1d4d38bf9/botox/aws.py#L201-L215
def get_instance_subnet_name(self, instance): """ Return a human readable name for given instance's subnet, or None. Uses stored config mapping of subnet IDs to names. """ # TODO: we have to do this here since we are monkeypatching Instance. # If we switch to custom Instance (sub)class then we could do it in the # object, provided it has access to the configuration data. if instance.subnet_id: # Account for omitted 'subnet-' subnet = self.config['subnets'][instance.subnet_id[7:]] else: subnet = BLANK return subnet
[ "def", "get_instance_subnet_name", "(", "self", ",", "instance", ")", ":", "# TODO: we have to do this here since we are monkeypatching Instance.", "# If we switch to custom Instance (sub)class then we could do it in the", "# object, provided it has access to the configuration data.", "if", ...
Return a human readable name for given instance's subnet, or None. Uses stored config mapping of subnet IDs to names.
[ "Return", "a", "human", "readable", "name", "for", "given", "instance", "s", "subnet", "or", "None", "." ]
python
train
miedzinski/google-oauth
google_oauth/service.py
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L214-L228
def access_token(self): """Stores always valid OAuth2 access token. Note: Accessing this property may result in HTTP request. Returns: str """ if (self._access_token is None or self.expiration_time <= int(time.time())): resp = self.make_access_request() self._access_token = resp.json()['access_token'] return self._access_token
[ "def", "access_token", "(", "self", ")", ":", "if", "(", "self", ".", "_access_token", "is", "None", "or", "self", ".", "expiration_time", "<=", "int", "(", "time", ".", "time", "(", ")", ")", ")", ":", "resp", "=", "self", ".", "make_access_request", ...
Stores always valid OAuth2 access token. Note: Accessing this property may result in HTTP request. Returns: str
[ "Stores", "always", "valid", "OAuth2", "access", "token", "." ]
python
train
SmileyChris/django-countries
django_countries/fields.py
https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/fields.py#L310-L313
def pre_save(self, *args, **kwargs): "Returns field's value just before saving." value = super(CharField, self).pre_save(*args, **kwargs) return self.get_prep_value(value)
[ "def", "pre_save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", "=", "super", "(", "CharField", ",", "self", ")", ".", "pre_save", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "get_prep_value"...
Returns field's value just before saving.
[ "Returns", "field", "s", "value", "just", "before", "saving", "." ]
python
train
rix0rrr/gcl
gcl/query.py
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L195-L206
def ldContains(self, what, key): """List/dictinary/missing-aware contains. If the value is a "missing_value", we'll treat it as non-existent so it will be overwritten by an empty list/dict when necessary to assign child keys. """ if isListKey(key): i = listKeyIndex(key) return i < len(what) and what[i] != missing_value else: return key in what and what[key] != missing_value
[ "def", "ldContains", "(", "self", ",", "what", ",", "key", ")", ":", "if", "isListKey", "(", "key", ")", ":", "i", "=", "listKeyIndex", "(", "key", ")", "return", "i", "<", "len", "(", "what", ")", "and", "what", "[", "i", "]", "!=", "missing_val...
List/dictinary/missing-aware contains. If the value is a "missing_value", we'll treat it as non-existent so it will be overwritten by an empty list/dict when necessary to assign child keys.
[ "List", "/", "dictinary", "/", "missing", "-", "aware", "contains", "." ]
python
train
Spinmob/spinmob
_functions.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_functions.py#L850-L881
def get_shell_history(): """ This only works with some shells. """ # try for ipython if 'get_ipython' in globals(): a = list(get_ipython().history_manager.input_hist_raw) a.reverse() return a elif 'SPYDER_SHELL_ID' in _os.environ: try: p = _os.path.join(_settings.path_user, ".spyder2", "history.py") a = read_lines(p) a.reverse() return a except: pass # otherwise try pyshell or pycrust (requires wx) else: try: import wx for x in wx.GetTopLevelWindows(): if type(x) in [wx.py.shell.ShellFrame, wx.py.crust.CrustFrame]: a = x.shell.GetText().split(">>>") a.reverse() return a except: pass return ['shell history not available']
[ "def", "get_shell_history", "(", ")", ":", "# try for ipython", "if", "'get_ipython'", "in", "globals", "(", ")", ":", "a", "=", "list", "(", "get_ipython", "(", ")", ".", "history_manager", ".", "input_hist_raw", ")", "a", ".", "reverse", "(", ")", "retur...
This only works with some shells.
[ "This", "only", "works", "with", "some", "shells", "." ]
python
train
onelogin/python3-saml
src/onelogin/saml2/utils.py
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/utils.py#L69-L85
def escape_url(url, lowercase_urlencoding=False): """ escape the non-safe symbols in url The encoding used by ADFS 3.0 is not compatible with python's quote_plus (ADFS produces lower case hex numbers and quote_plus produces upper case hex numbers) :param url: the url to escape :type url: str :param lowercase_urlencoding: lowercase or no :type lowercase_urlencoding: boolean :return: the escaped url :rtype str """ encoded = quote_plus(url) return re.sub(r"%[A-F0-9]{2}", lambda m: m.group(0).lower(), encoded) if lowercase_urlencoding else encoded
[ "def", "escape_url", "(", "url", ",", "lowercase_urlencoding", "=", "False", ")", ":", "encoded", "=", "quote_plus", "(", "url", ")", "return", "re", ".", "sub", "(", "r\"%[A-F0-9]{2}\"", ",", "lambda", "m", ":", "m", ".", "group", "(", "0", ")", ".", ...
escape the non-safe symbols in url The encoding used by ADFS 3.0 is not compatible with python's quote_plus (ADFS produces lower case hex numbers and quote_plus produces upper case hex numbers) :param url: the url to escape :type url: str :param lowercase_urlencoding: lowercase or no :type lowercase_urlencoding: boolean :return: the escaped url :rtype str
[ "escape", "the", "non", "-", "safe", "symbols", "in", "url", "The", "encoding", "used", "by", "ADFS", "3", ".", "0", "is", "not", "compatible", "with", "python", "s", "quote_plus", "(", "ADFS", "produces", "lower", "case", "hex", "numbers", "and", "quote...
python
train
david-caro/python-autosemver
autosemver/packaging.py
https://github.com/david-caro/python-autosemver/blob/3bc0adb70c33e4bd3623ae4c1944d5ee37f4303d/autosemver/packaging.py#L95-L115
def get_authors(project_dir=os.curdir): """ Retrieves the authors list, from the AUTHORS file (if in a package) or generates it from the git history. Returns: list(str): List of authors Raises: RuntimeError: If the authors could not be retrieved """ authors = set() pkg_info_file = os.path.join(project_dir, 'PKG-INFO') authors_file = os.path.join(project_dir, 'AUTHORS') if os.path.exists(pkg_info_file) and os.path.exists(authors_file): with open(authors_file) as authors_fd: authors = set(authors_fd.read().splitlines()) else: authors = api.get_authors(repo_path=project_dir) return authors
[ "def", "get_authors", "(", "project_dir", "=", "os", ".", "curdir", ")", ":", "authors", "=", "set", "(", ")", "pkg_info_file", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "'PKG-INFO'", ")", "authors_file", "=", "os", ".", "path", "."...
Retrieves the authors list, from the AUTHORS file (if in a package) or generates it from the git history. Returns: list(str): List of authors Raises: RuntimeError: If the authors could not be retrieved
[ "Retrieves", "the", "authors", "list", "from", "the", "AUTHORS", "file", "(", "if", "in", "a", "package", ")", "or", "generates", "it", "from", "the", "git", "history", "." ]
python
train
tensorflow/mesh
mesh_tensorflow/ops.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4975-L5011
def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False): """Concat each block with the margins of adjacent blocks. Get left and right blocks_dim and concatenate along block_size_dim. Args: x: a Tensor. blocks_dim: a Dimension in x.shape block_size_dim: a Dimension in x.shape halo_size: an integer wrap: a boolean Returns: a Tensor with the same shape as x, other than in block_size_dim, whose size is increased by 2*halo_size. """ if halo_size == 0: return x block_size = block_size_dim.size partial_size = halo_size % block_size num_complete_blocks = halo_size // block_size parts = [x] for i in xrange(1, num_complete_blocks + 1): parts = ([shift(x, i, blocks_dim, wrap)] + parts + [shift(x, -i, blocks_dim, wrap)]) if partial_size > 0: left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name) right_margin = mtf_slice( x, block_size_dim.size - partial_size, partial_size, block_size_dim.name) parts = ( [shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)] + parts + [shift(left_margin, -(num_complete_blocks + 1), blocks_dim, wrap)]) return concat(parts, block_size_dim.name)
[ "def", "halo_exchange", "(", "x", ",", "blocks_dim", ",", "block_size_dim", ",", "halo_size", ",", "wrap", "=", "False", ")", ":", "if", "halo_size", "==", "0", ":", "return", "x", "block_size", "=", "block_size_dim", ".", "size", "partial_size", "=", "hal...
Concat each block with the margins of adjacent blocks. Get left and right blocks_dim and concatenate along block_size_dim. Args: x: a Tensor. blocks_dim: a Dimension in x.shape block_size_dim: a Dimension in x.shape halo_size: an integer wrap: a boolean Returns: a Tensor with the same shape as x, other than in block_size_dim, whose size is increased by 2*halo_size.
[ "Concat", "each", "block", "with", "the", "margins", "of", "adjacent", "blocks", "." ]
python
train
richardliaw/track
track/autodetect.py
https://github.com/richardliaw/track/blob/7ac42ea34e5c1d7bb92fd813e938835a06a63fc7/track/autodetect.py#L45-L55
def git_hash(): """returns the current git hash or unknown if not in git repo""" if git_repo() is None: return "unknown" git_hash = subprocess.check_output( ["git", "rev-parse", "HEAD"]) # git_hash is a byte string; we want a string. git_hash = git_hash.decode("utf-8") # git_hash also comes with an extra \n at the end, which we remove. git_hash = git_hash.strip() return git_hash
[ "def", "git_hash", "(", ")", ":", "if", "git_repo", "(", ")", "is", "None", ":", "return", "\"unknown\"", "git_hash", "=", "subprocess", ".", "check_output", "(", "[", "\"git\"", ",", "\"rev-parse\"", ",", "\"HEAD\"", "]", ")", "# git_hash is a byte string; we...
returns the current git hash or unknown if not in git repo
[ "returns", "the", "current", "git", "hash", "or", "unknown", "if", "not", "in", "git", "repo" ]
python
train
pyviz/holoviews
holoviews/core/io.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/io.py#L652-L688
def add(self, obj=None, filename=None, data=None, info={}, **kwargs): """ If a filename is supplied, it will be used. Otherwise, a filename will be generated from the supplied object. Note that if the explicit filename uses the {timestamp} field, it will be formatted upon export. The data to be archived is either supplied explicitly as 'data' or automatically rendered from the object. """ if [filename, obj] == [None, None]: raise Exception("Either filename or a HoloViews object is " "needed to create an entry in the archive.") elif obj is None and not self.parse_fields(filename).issubset({'timestamp'}): raise Exception("Only the {timestamp} formatter may be used unless an object is supplied.") elif [obj, data] == [None, None]: raise Exception("Either an object or explicit data must be " "supplied to create an entry in the archive.") elif data and 'mime_type' not in info: raise Exception("The mime-type must be supplied in the info dictionary " "when supplying data directly") self._validate_formatters() entries = [] if data is None: for exporter in self.exporters: rendered = exporter(obj) if rendered is None: continue (data, new_info) = rendered info = dict(info, **new_info) entries.append((data, info)) else: entries.append((data, info)) for (data, info) in entries: self._add_content(obj, data, info, filename=filename)
[ "def", "add", "(", "self", ",", "obj", "=", "None", ",", "filename", "=", "None", ",", "data", "=", "None", ",", "info", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "if", "[", "filename", ",", "obj", "]", "==", "[", "None", ",", "None",...
If a filename is supplied, it will be used. Otherwise, a filename will be generated from the supplied object. Note that if the explicit filename uses the {timestamp} field, it will be formatted upon export. The data to be archived is either supplied explicitly as 'data' or automatically rendered from the object.
[ "If", "a", "filename", "is", "supplied", "it", "will", "be", "used", ".", "Otherwise", "a", "filename", "will", "be", "generated", "from", "the", "supplied", "object", ".", "Note", "that", "if", "the", "explicit", "filename", "uses", "the", "{", "timestamp...
python
train
cdumay/kser
src/kser/controller.py
https://github.com/cdumay/kser/blob/fbd6fe9ab34b8b89d9937e5ff727614304af48c1/src/kser/controller.py#L129-L143
def register(cls, name, entrypoint): """ Register a new entrypoint :param str name: Key used by messages :param kser.entry.Entrypoint entrypoint: class to load :raises ValidationError: Invalid entry """ if not issubclass(entrypoint, Entrypoint): raise ValidationError( "Invalid type for entry '{}', MUST implement " "kser.entry.Entrypoint".format(name), extra=dict(entrypoint=name) ) cls.ENTRYPOINTS[name] = entrypoint logger.debug("{}.Registered: {}".format(cls.__name__, name))
[ "def", "register", "(", "cls", ",", "name", ",", "entrypoint", ")", ":", "if", "not", "issubclass", "(", "entrypoint", ",", "Entrypoint", ")", ":", "raise", "ValidationError", "(", "\"Invalid type for entry '{}', MUST implement \"", "\"kser.entry.Entrypoint\"", ".", ...
Register a new entrypoint :param str name: Key used by messages :param kser.entry.Entrypoint entrypoint: class to load :raises ValidationError: Invalid entry
[ "Register", "a", "new", "entrypoint" ]
python
train
mosdef-hub/mbuild
mbuild/coordinate_transform.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/coordinate_transform.py#L72-L80
def apply_to(self, A): """Apply the coordinate transformation to points in A. """ if A.ndim == 1: A = np.expand_dims(A, axis=0) rows, cols = A.shape A_new = np.hstack([A, np.ones((rows, 1))]) A_new = np.transpose(self.T.dot(np.transpose(A_new))) return A_new[:, 0:cols]
[ "def", "apply_to", "(", "self", ",", "A", ")", ":", "if", "A", ".", "ndim", "==", "1", ":", "A", "=", "np", ".", "expand_dims", "(", "A", ",", "axis", "=", "0", ")", "rows", ",", "cols", "=", "A", ".", "shape", "A_new", "=", "np", ".", "hst...
Apply the coordinate transformation to points in A.
[ "Apply", "the", "coordinate", "transformation", "to", "points", "in", "A", "." ]
python
train
DallasMorningNews/django-datafreezer
datafreezer/views.py
https://github.com/DallasMorningNews/django-datafreezer/blob/982dcf2015c80a280f1a093e32977cb71d4ea7aa/datafreezer/views.py#L962-L988
def get(self, request, slug): """Basic functionality for GET request to view. """ matching_datasets = self.generate_matching_datasets(slug) if matching_datasets is None: raise Http404("Datasets meeting these criteria do not exist.") base_context = { 'datasets': matching_datasets, 'num_datasets': matching_datasets.count(), 'page_title': self.generate_page_title(slug), } additional_context = self.generate_additional_context( matching_datasets ) base_context.update(additional_context) context = base_context return render( request, self.template_path, context )
[ "def", "get", "(", "self", ",", "request", ",", "slug", ")", ":", "matching_datasets", "=", "self", ".", "generate_matching_datasets", "(", "slug", ")", "if", "matching_datasets", "is", "None", ":", "raise", "Http404", "(", "\"Datasets meeting these criteria do no...
Basic functionality for GET request to view.
[ "Basic", "functionality", "for", "GET", "request", "to", "view", "." ]
python
train
pkgw/pwkit
pwkit/numutil.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/numutil.py#L351-L361
def reduce_data_frame_evenly_with_gaps (df, valcol, target_len, maxgap, **kwargs): """"Reduce" a DataFrame by collapsing rows in grouped chunks, grouping based on gaps in one of the columns. This function combines :func:`reduce_data_frame` with :func:`slice_evenly_with_gaps`. """ return reduce_data_frame (df, slice_evenly_with_gaps (df[valcol], target_len, maxgap), **kwargs)
[ "def", "reduce_data_frame_evenly_with_gaps", "(", "df", ",", "valcol", ",", "target_len", ",", "maxgap", ",", "*", "*", "kwargs", ")", ":", "return", "reduce_data_frame", "(", "df", ",", "slice_evenly_with_gaps", "(", "df", "[", "valcol", "]", ",", "target_len...
Reduce" a DataFrame by collapsing rows in grouped chunks, grouping based on gaps in one of the columns. This function combines :func:`reduce_data_frame` with :func:`slice_evenly_with_gaps`.
[ "Reduce", "a", "DataFrame", "by", "collapsing", "rows", "in", "grouped", "chunks", "grouping", "based", "on", "gaps", "in", "one", "of", "the", "columns", "." ]
python
train
prezi/django-zipkin
django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py
https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L585-L594
def getTraceCombosByIds(self, trace_ids, adjust): """ Not content with just one of traces, summaries or timelines? Want it all? This is the method for you. Parameters: - trace_ids - adjust """ self.send_getTraceCombosByIds(trace_ids, adjust) return self.recv_getTraceCombosByIds()
[ "def", "getTraceCombosByIds", "(", "self", ",", "trace_ids", ",", "adjust", ")", ":", "self", ".", "send_getTraceCombosByIds", "(", "trace_ids", ",", "adjust", ")", "return", "self", ".", "recv_getTraceCombosByIds", "(", ")" ]
Not content with just one of traces, summaries or timelines? Want it all? This is the method for you. Parameters: - trace_ids - adjust
[ "Not", "content", "with", "just", "one", "of", "traces", "summaries", "or", "timelines?", "Want", "it", "all?", "This", "is", "the", "method", "for", "you", "." ]
python
train
ArchiveTeam/wpull
wpull/driver/process.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/driver/process.py#L96-L112
def _read_stdout(self): '''Continuously read the stdout for messages.''' try: while self._process.returncode is None: line = yield from self._process.stdout.readline() _logger.debug('Read stdout line %s', repr(line)) if not line: break if self._stdout_callback: yield from self._stdout_callback(line) except Exception: _logger.exception('Unhandled read stdout exception.') raise
[ "def", "_read_stdout", "(", "self", ")", ":", "try", ":", "while", "self", ".", "_process", ".", "returncode", "is", "None", ":", "line", "=", "yield", "from", "self", ".", "_process", ".", "stdout", ".", "readline", "(", ")", "_logger", ".", "debug", ...
Continuously read the stdout for messages.
[ "Continuously", "read", "the", "stdout", "for", "messages", "." ]
python
train
yyuu/botornado
boto/s3/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/connection.py#L351-L365
def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ rs = self.get_all_buckets(headers=headers) return rs.ID
[ "def", "get_canonical_user_id", "(", "self", ",", "headers", "=", "None", ")", ":", "rs", "=", "self", ".", "get_all_buckets", "(", "headers", "=", "headers", ")", "return", "rs", ".", "ID" ]
Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id.
[ "Convenience", "method", "that", "returns", "the", "CanonicalUserID", "of", "the", "user", "who", "s", "credentials", "are", "associated", "with", "the", "connection", ".", "The", "only", "way", "to", "get", "this", "value", "is", "to", "do", "a", "GET", "...
python
train
redapple/parslepy
parslepy/selectors.py
https://github.com/redapple/parslepy/blob/a8bc4c0592824459629018c8f4c6ae3dad6cc3cc/parslepy/selectors.py#L222-L231
def _add_parsley_ns(cls, namespace_dict): """ Extend XPath evaluation with Parsley extensions' namespace """ namespace_dict.update({ 'parslepy' : cls.LOCAL_NAMESPACE, 'parsley' : cls.LOCAL_NAMESPACE, }) return namespace_dict
[ "def", "_add_parsley_ns", "(", "cls", ",", "namespace_dict", ")", ":", "namespace_dict", ".", "update", "(", "{", "'parslepy'", ":", "cls", ".", "LOCAL_NAMESPACE", ",", "'parsley'", ":", "cls", ".", "LOCAL_NAMESPACE", ",", "}", ")", "return", "namespace_dict" ...
Extend XPath evaluation with Parsley extensions' namespace
[ "Extend", "XPath", "evaluation", "with", "Parsley", "extensions", "namespace" ]
python
valid
pymc-devs/pymc
pymc/diagnostics.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L403-L450
def batch_means(x, f=lambda y: y, theta=.5, q=.95, burn=0): """ TODO: Use Bayesian CI. Returns the half-width of the frequentist confidence interval (q'th quantile) of the Monte Carlo estimate of E[f(x)]. :Parameters: x : sequence Sampled series. Must be a one-dimensional array. f : function The MCSE of E[f(x)] will be computed. theta : float between 0 and 1 The batch length will be set to len(x) ** theta. q : float between 0 and 1 The desired quantile. :Example: >>>batch_means(x, f=lambda x: x**2, theta=.5, q=.95) :Reference: Flegal, James M. and Haran, Murali and Jones, Galin L. (2007). Markov chain Monte Carlo: Can we trust the third significant figure? <Publication> :Note: Requires SciPy """ try: import scipy from scipy import stats except ImportError: raise ImportError('SciPy must be installed to use batch_means.') x = x[burn:] n = len(x) b = np.int(n ** theta) a = n / b t_quant = stats.t.isf(1 - q, a - 1) Y = np.array([np.mean(f(x[i * b:(i + 1) * b])) for i in xrange(a)]) sig = b / (a - 1.) * sum((Y - np.mean(f(x))) ** 2) return t_quant * sig / np.sqrt(n)
[ "def", "batch_means", "(", "x", ",", "f", "=", "lambda", "y", ":", "y", ",", "theta", "=", ".5", ",", "q", "=", ".95", ",", "burn", "=", "0", ")", ":", "try", ":", "import", "scipy", "from", "scipy", "import", "stats", "except", "ImportError", ":...
TODO: Use Bayesian CI. Returns the half-width of the frequentist confidence interval (q'th quantile) of the Monte Carlo estimate of E[f(x)]. :Parameters: x : sequence Sampled series. Must be a one-dimensional array. f : function The MCSE of E[f(x)] will be computed. theta : float between 0 and 1 The batch length will be set to len(x) ** theta. q : float between 0 and 1 The desired quantile. :Example: >>>batch_means(x, f=lambda x: x**2, theta=.5, q=.95) :Reference: Flegal, James M. and Haran, Murali and Jones, Galin L. (2007). Markov chain Monte Carlo: Can we trust the third significant figure? <Publication> :Note: Requires SciPy
[ "TODO", ":", "Use", "Bayesian", "CI", "." ]
python
train
saltstack/salt
salt/modules/boto3_elasticache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto3_elasticache.py#L525-L536
def list_cache_subnet_groups(region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names Example: .. code-block:: bash salt myminion boto3_elasticache.list_cache_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in describe_cache_subnet_groups(None, region, key, keyid, profile)]
[ "def", "list_cache_subnet_groups", "(", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "return", "[", "g", "[", "'CacheSubnetGroupName'", "]", "for", "g", "in", "describe_cache_subnet_groups"...
Return a list of all cache subnet group names Example: .. code-block:: bash salt myminion boto3_elasticache.list_cache_subnet_groups region=us-east-1
[ "Return", "a", "list", "of", "all", "cache", "subnet", "group", "names" ]
python
train
tritemio/PyBroMo
pybromo/diffusion.py
https://github.com/tritemio/PyBroMo/blob/b75f82a4551ff37e7c7a7e6954c536451f3e6d06/pybromo/diffusion.py#L373-L379
def compact_name(self, hashsize=6): """Compact representation of all simulation parameters """ # this can be made more robust for ID > 9 (double digit) s = self.compact_name_core(hashsize, t_max=True) s += "_ID%d-%d" % (self.ID, self.EID) return s
[ "def", "compact_name", "(", "self", ",", "hashsize", "=", "6", ")", ":", "# this can be made more robust for ID > 9 (double digit)", "s", "=", "self", ".", "compact_name_core", "(", "hashsize", ",", "t_max", "=", "True", ")", "s", "+=", "\"_ID%d-%d\"", "%", "(",...
Compact representation of all simulation parameters
[ "Compact", "representation", "of", "all", "simulation", "parameters" ]
python
valid
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py#L675-L679
def _tr_magic(line_info): "Translate lines escaped with: %" tpl = '%sget_ipython().magic(%r)' cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip() return tpl % (line_info.pre, cmd)
[ "def", "_tr_magic", "(", "line_info", ")", ":", "tpl", "=", "'%sget_ipython().magic(%r)'", "cmd", "=", "' '", ".", "join", "(", "[", "line_info", ".", "ifun", ",", "line_info", ".", "the_rest", "]", ")", ".", "strip", "(", ")", "return", "tpl", "%", "(...
Translate lines escaped with: %
[ "Translate", "lines", "escaped", "with", ":", "%" ]
python
test
ARMmbed/icetea
icetea_lib/Reports/ReportJunit.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/Reports/ReportJunit.py#L106-L170
def __generate(results): """ Static method which generates the Junit xml string from results :param results: Results as ResultList object. :return: Junit xml format string. """ doc, tag, text = Doc().tagtext() # Counters for testsuite tag info count = 0 fails = 0 errors = 0 skips = 0 for result in results: # Loop through all results and count the ones that were not later retried. if result.passed() is False: if result.retries_left > 0: # This will appear in the list again, move on continue count += 1 if result.passed(): # Passed, no need to increment anything else continue elif result.skipped(): skips += 1 elif result.was_inconclusive(): errors += 1 else: fails += 1 with tag('testsuite', tests=str(count), failures=str(fails), errors=str(errors), skipped=str(skips)): for result in results: if result.passed() is False and result.retries_left > 0: continue class_name = result.get_tc_name() models = result.get_dut_models() if models: class_name = class_name + "." + models name = result.get_toolchain() with tag('testcase', classname=class_name, name=name, time=result.get_duration(seconds=True)): if result.stdout: with tag('system-out'): text(result.stdout) if result.passed(): continue elif result.skipped(): with tag('skipped'): text(result.skip_reason) elif result.was_inconclusive(): with tag('error', message=hex_escape_str(result.fail_reason)): text(result.stderr) else: with tag('failure', message=hex_escape_str(result.fail_reason)): text(result.stderr) return indent( doc.getvalue(), indentation=' '*4 )
[ "def", "__generate", "(", "results", ")", ":", "doc", ",", "tag", ",", "text", "=", "Doc", "(", ")", ".", "tagtext", "(", ")", "# Counters for testsuite tag info", "count", "=", "0", "fails", "=", "0", "errors", "=", "0", "skips", "=", "0", "for", "r...
Static method which generates the Junit xml string from results :param results: Results as ResultList object. :return: Junit xml format string.
[ "Static", "method", "which", "generates", "the", "Junit", "xml", "string", "from", "results" ]
python
train
ska-sa/purr
Purr/Editors.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Editors.py#L484-L500
def resolveFilenameConflicts(self): """Goes through list of DPs to make sure that their destination names do not clash. Adjust names as needed. Returns True if some conflicts were resolved. """ taken_names = set() resolved = False # iterate through items for item, dp in self.getItemDPList(): # only apply this to saved DPs if dp.policy not in ["remove", "ignore", "banish"]: name0 = str(item.text(self.ColRename)) name = _makeUniqueFilename(taken_names, name0) if name != name0: item.setText(self.ColRename, name) resolved = True self.emit(SIGNAL("updated")) return resolved
[ "def", "resolveFilenameConflicts", "(", "self", ")", ":", "taken_names", "=", "set", "(", ")", "resolved", "=", "False", "# iterate through items", "for", "item", ",", "dp", "in", "self", ".", "getItemDPList", "(", ")", ":", "# only apply this to saved DPs", "if...
Goes through list of DPs to make sure that their destination names do not clash. Adjust names as needed. Returns True if some conflicts were resolved.
[ "Goes", "through", "list", "of", "DPs", "to", "make", "sure", "that", "their", "destination", "names", "do", "not", "clash", ".", "Adjust", "names", "as", "needed", ".", "Returns", "True", "if", "some", "conflicts", "were", "resolved", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/servers/id_pools_ranges.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/servers/id_pools_ranges.py#L123-L142
def get_allocated_fragments(self, id_or_uri, count=-1, start=0): """ Gets all fragments that have been allocated in range. Args: id_or_uri: ID or URI of range. count: The number of resources to return. A count of -1 requests all items. The actual number of items in the response may differ from the requested count if the sum of start and count exceed the total number of items. start: The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the first available item. Returns: list: A list with the allocated fragements. """ uri = self._client.build_uri(id_or_uri) + "/allocated-fragments?start={0}&count={1}".format(start, count) return self._client.get_collection(uri)
[ "def", "get_allocated_fragments", "(", "self", ",", "id_or_uri", ",", "count", "=", "-", "1", ",", "start", "=", "0", ")", ":", "uri", "=", "self", ".", "_client", ".", "build_uri", "(", "id_or_uri", ")", "+", "\"/allocated-fragments?start={0}&count={1}\"", ...
Gets all fragments that have been allocated in range. Args: id_or_uri: ID or URI of range. count: The number of resources to return. A count of -1 requests all items. The actual number of items in the response may differ from the requested count if the sum of start and count exceed the total number of items. start: The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the first available item. Returns: list: A list with the allocated fragements.
[ "Gets", "all", "fragments", "that", "have", "been", "allocated", "in", "range", "." ]
python
train
peterbrittain/asciimatics
asciimatics/paths.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/paths.py#L123-L130
def wait(self, delay): """ Wait at the current location for the specified number of iterations. :param delay: The time to wait (in animation frames). """ for _ in range(0, delay): self._add_step((self._rec_x, self._rec_y))
[ "def", "wait", "(", "self", ",", "delay", ")", ":", "for", "_", "in", "range", "(", "0", ",", "delay", ")", ":", "self", ".", "_add_step", "(", "(", "self", ".", "_rec_x", ",", "self", ".", "_rec_y", ")", ")" ]
Wait at the current location for the specified number of iterations. :param delay: The time to wait (in animation frames).
[ "Wait", "at", "the", "current", "location", "for", "the", "specified", "number", "of", "iterations", "." ]
python
train
wreckage/django-happenings
happenings/utils/calendars.py
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/calendars.py#L270-L300
def popover_helper(self): """Populate variables used to build popovers.""" # when display_month = month_name[self.mo] if isinstance(display_month, six.binary_type) and self.encoding: display_month = display_month.decode('utf-8') self.when = ('<p><b>When:</b> ' + display_month + ' ' + str(self.day) + ', ' + self.event.l_start_date.strftime( LEGACY_CALENDAR_TIME_FORMAT).lstrip('0') + ' - ' + self.event.l_end_date.strftime(LEGACY_CALENDAR_TIME_FORMAT).lstrip('0') + '</p>') if self.event.location.exists(): # where self.where = '<p><b>Where:</b> ' for l in self.event.location.all(): self.where += l.name self.where += '</p>' else: self.where = '' # description self.desc = '<p><b>Description:</b> ' + self.event.description[:100] self.desc += ('...</p>' if len(self.event.description) > 100 else '</p>') self.event_url = self.event.get_absolute_url() # url t = LEGACY_CALENDAR_TIME_FORMAT if self.event.l_start_date.minute else LEGACY_CALENDAR_HOUR_FORMAT self.title2 = (self.event.l_start_date.strftime(t).lstrip('0') + ' ' + self.title)
[ "def", "popover_helper", "(", "self", ")", ":", "# when", "display_month", "=", "month_name", "[", "self", ".", "mo", "]", "if", "isinstance", "(", "display_month", ",", "six", ".", "binary_type", ")", "and", "self", ".", "encoding", ":", "display_month", ...
Populate variables used to build popovers.
[ "Populate", "variables", "used", "to", "build", "popovers", "." ]
python
test
wandb/client
wandb/vendor/prompt_toolkit/styles/from_dict.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/styles/from_dict.py#L42-L128
def style_from_dict(style_dict, include_defaults=True): """ Create a ``Style`` instance from a dictionary or other mapping. The dictionary is equivalent to the ``Style.styles`` dictionary from pygments, with a few additions: it supports 'reverse' and 'blink'. Usage:: style_from_dict({ Token: '#ff0000 bold underline', Token.Title: 'blink', Token.SomethingElse: 'reverse', }) :param include_defaults: Include the defaults (built-in) styling for selected text, etc...) """ assert isinstance(style_dict, Mapping) if include_defaults: s2 = {} s2.update(DEFAULT_STYLE_EXTENSIONS) s2.update(style_dict) style_dict = s2 # Expand token inheritance and turn style description into Attrs. token_to_attrs = {} # (Loop through the tokens in order. Sorting makes sure that # we process the parent first.) for ttype, styledef in sorted(style_dict.items()): # Start from parent Attrs or default Attrs. attrs = DEFAULT_ATTRS if 'noinherit' not in styledef: for i in range(1, len(ttype) + 1): try: attrs = token_to_attrs[ttype[:-i]] except KeyError: pass else: break # Now update with the given attributes. for part in styledef.split(): if part == 'noinherit': pass elif part == 'bold': attrs = attrs._replace(bold=True) elif part == 'nobold': attrs = attrs._replace(bold=False) elif part == 'italic': attrs = attrs._replace(italic=True) elif part == 'noitalic': attrs = attrs._replace(italic=False) elif part == 'underline': attrs = attrs._replace(underline=True) elif part == 'nounderline': attrs = attrs._replace(underline=False) # prompt_toolkit extensions. Not in Pygments. elif part == 'blink': attrs = attrs._replace(blink=True) elif part == 'noblink': attrs = attrs._replace(blink=False) elif part == 'reverse': attrs = attrs._replace(reverse=True) elif part == 'noreverse': attrs = attrs._replace(reverse=False) # Pygments properties that we ignore. elif part in ('roman', 'sans', 'mono'): pass elif part.startswith('border:'): pass # Colors. elif part.startswith('bg:'): attrs = attrs._replace(bgcolor=_colorformat(part[3:])) else: attrs = attrs._replace(color=_colorformat(part)) token_to_attrs[ttype] = attrs return _StyleFromDict(token_to_attrs)
[ "def", "style_from_dict", "(", "style_dict", ",", "include_defaults", "=", "True", ")", ":", "assert", "isinstance", "(", "style_dict", ",", "Mapping", ")", "if", "include_defaults", ":", "s2", "=", "{", "}", "s2", ".", "update", "(", "DEFAULT_STYLE_EXTENSIONS...
Create a ``Style`` instance from a dictionary or other mapping. The dictionary is equivalent to the ``Style.styles`` dictionary from pygments, with a few additions: it supports 'reverse' and 'blink'. Usage:: style_from_dict({ Token: '#ff0000 bold underline', Token.Title: 'blink', Token.SomethingElse: 'reverse', }) :param include_defaults: Include the defaults (built-in) styling for selected text, etc...)
[ "Create", "a", "Style", "instance", "from", "a", "dictionary", "or", "other", "mapping", "." ]
python
train
dossier/dossier.store
dossier/store/elastic.py
https://github.com/dossier/dossier.store/blob/b22ffe2470bba9fcc98a30cb55b437bfa1521e7f/dossier/store/elastic.py#L782-L791
def _fc_index_disjunction_from_query(self, query_fc, fname): 'Creates a disjunction for keyword scan queries.' if len(query_fc.get(fname, [])) == 0: return [] terms = query_fc[fname].keys() disj = [] for fname in self.indexes[fname]['feature_names']: disj.append({'terms': {fname_to_idx_name(fname): terms}}) return disj
[ "def", "_fc_index_disjunction_from_query", "(", "self", ",", "query_fc", ",", "fname", ")", ":", "if", "len", "(", "query_fc", ".", "get", "(", "fname", ",", "[", "]", ")", ")", "==", "0", ":", "return", "[", "]", "terms", "=", "query_fc", "[", "fnam...
Creates a disjunction for keyword scan queries.
[ "Creates", "a", "disjunction", "for", "keyword", "scan", "queries", "." ]
python
test
openvax/isovar
isovar/reference_context.py
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_context.py#L98-L139
def reference_contexts_for_variant( variant, context_size, transcript_id_whitelist=None): """ variant : varcode.Variant context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns list of ReferenceContext objects, sorted by maximum length of coding sequence of any supporting transcripts. """ overlapping_transcripts = reference_transcripts_for_variant( variant=variant, transcript_id_whitelist=transcript_id_whitelist) # dictionary mapping SequenceKeyWithReadingFrame keys to list of # transcript objects sequence_groups = defaultdict(list) for transcript in overlapping_transcripts: sequence_key_with_reading_frame = \ ReferenceCodingSequenceKey.from_variant_and_transcript( variant=variant, transcript=transcript, context_size=context_size) if sequence_key_with_reading_frame is not None: sequence_groups[sequence_key_with_reading_frame].append(transcript) reference_contexts = [ ReferenceContext.from_reference_coding_sequence_key( key, variant, matching_transcripts) for (key, matching_transcripts) in sequence_groups.items() ] reference_contexts.sort( key=ReferenceContext.sort_key_decreasing_max_length_transcript_cds) return reference_contexts
[ "def", "reference_contexts_for_variant", "(", "variant", ",", "context_size", ",", "transcript_id_whitelist", "=", "None", ")", ":", "overlapping_transcripts", "=", "reference_transcripts_for_variant", "(", "variant", "=", "variant", ",", "transcript_id_whitelist", "=", "...
variant : varcode.Variant context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns list of ReferenceContext objects, sorted by maximum length of coding sequence of any supporting transcripts.
[ "variant", ":", "varcode", ".", "Variant" ]
python
train
openego/ding0
ding0/tools/results.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/tools/results.py#L1216-L1255
def init_mv_grid(mv_grid_districts=[3545], filename='ding0_tests_grids_1.pkl'): '''Runs ding0 over the districtis selected in mv_grid_districts It also writes the result in filename. If filename = False, then the network is not saved. Parameters ---------- mv_grid_districts: :any:`list` of :obj:`int` Districts IDs: Defaults to [3545] filename: str Defaults to 'ding0_tests_grids_1.pkl' If filename=False, then the network is not saved Returns ------- NetworkDing0 The created MV network. ''' print('\n########################################') print(' Running ding0 for district', mv_grid_districts) # database connection/ session engine = db.connection(section='oedb') session = sessionmaker(bind=engine)() # instantiate new ding0 network object nd = NetworkDing0(name='network') # run DINGO on selected MV Grid District nd.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts) # export grid to file (pickle) if filename: print('\n########################################') print(' Saving result in ', filename) save_nd_to_pickle(nd, filename=filename) print('\n########################################') return nd
[ "def", "init_mv_grid", "(", "mv_grid_districts", "=", "[", "3545", "]", ",", "filename", "=", "'ding0_tests_grids_1.pkl'", ")", ":", "print", "(", "'\\n########################################'", ")", "print", "(", "' Running ding0 for district'", ",", "mv_grid_districts...
Runs ding0 over the districtis selected in mv_grid_districts It also writes the result in filename. If filename = False, then the network is not saved. Parameters ---------- mv_grid_districts: :any:`list` of :obj:`int` Districts IDs: Defaults to [3545] filename: str Defaults to 'ding0_tests_grids_1.pkl' If filename=False, then the network is not saved Returns ------- NetworkDing0 The created MV network.
[ "Runs", "ding0", "over", "the", "districtis", "selected", "in", "mv_grid_districts" ]
python
train
SwissDataScienceCenter/renku-python
renku/cli/init.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/init.py#L87-L91
def store_directory(ctx, param, value): """Store directory as a new Git home.""" Path(value).mkdir(parents=True, exist_ok=True) set_git_home(value) return value
[ "def", "store_directory", "(", "ctx", ",", "param", ",", "value", ")", ":", "Path", "(", "value", ")", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "set_git_home", "(", "value", ")", "return", "value" ]
Store directory as a new Git home.
[ "Store", "directory", "as", "a", "new", "Git", "home", "." ]
python
train
SeattleTestbed/seash
pyreadline/lineeditor/lineobj.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/lineeditor/lineobj.py#L243-L246
def visible_line_width(self, position = Point): """Return the visible width of the text in line buffer up to position.""" extra_char_width = len([ None for c in self[:position].line_buffer if 0x2013 <= ord(c) <= 0xFFFD]) return len(self[:position].quoted_text()) + self[:position].line_buffer.count(u"\t")*7 + extra_char_width
[ "def", "visible_line_width", "(", "self", ",", "position", "=", "Point", ")", ":", "extra_char_width", "=", "len", "(", "[", "None", "for", "c", "in", "self", "[", ":", "position", "]", ".", "line_buffer", "if", "0x2013", "<=", "ord", "(", "c", ")", ...
Return the visible width of the text in line buffer up to position.
[ "Return", "the", "visible", "width", "of", "the", "text", "in", "line", "buffer", "up", "to", "position", "." ]
python
train
gem/oq-engine
openquake/hazardlib/sourcewriter.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/sourcewriter.py#L524-L540
def build_simple_fault_source_node(fault_source): """ Parses a simple fault source to a Node class :param fault_source: Simple fault source as instance of :class: `openquake.hazardlib.source.simple_fault.SimpleFaultSource` :returns: Instance of :class:`openquake.baselib.node.Node` """ # Parse geometry source_nodes = [build_simple_fault_geometry(fault_source)] # Parse common fault source attributes source_nodes.extend(get_fault_source_nodes(fault_source)) return Node("simpleFaultSource", get_source_attributes(fault_source), nodes=source_nodes)
[ "def", "build_simple_fault_source_node", "(", "fault_source", ")", ":", "# Parse geometry", "source_nodes", "=", "[", "build_simple_fault_geometry", "(", "fault_source", ")", "]", "# Parse common fault source attributes", "source_nodes", ".", "extend", "(", "get_fault_source_...
Parses a simple fault source to a Node class :param fault_source: Simple fault source as instance of :class: `openquake.hazardlib.source.simple_fault.SimpleFaultSource` :returns: Instance of :class:`openquake.baselib.node.Node`
[ "Parses", "a", "simple", "fault", "source", "to", "a", "Node", "class" ]
python
train
bwohlberg/sporco
sporco/admm/admm.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/admm.py#L1234-L1240
def obfn_g(self, Y): r"""Compute :math:`g(\mathbf{y}) = g_0(\mathbf{y}_0) + g_1(\mathbf{y}_1)` component of ADMM objective function. """ return self.obfn_g0(self.obfn_g0var()) + \ self.obfn_g1(self.obfn_g1var())
[ "def", "obfn_g", "(", "self", ",", "Y", ")", ":", "return", "self", ".", "obfn_g0", "(", "self", ".", "obfn_g0var", "(", ")", ")", "+", "self", ".", "obfn_g1", "(", "self", ".", "obfn_g1var", "(", ")", ")" ]
r"""Compute :math:`g(\mathbf{y}) = g_0(\mathbf{y}_0) + g_1(\mathbf{y}_1)` component of ADMM objective function.
[ "r", "Compute", ":", "math", ":", "g", "(", "\\", "mathbf", "{", "y", "}", ")", "=", "g_0", "(", "\\", "mathbf", "{", "y", "}", "_0", ")", "+", "g_1", "(", "\\", "mathbf", "{", "y", "}", "_1", ")", "component", "of", "ADMM", "objective", "fun...
python
train
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L2184-L2215
def copy(self, name, exclude_from_current=False, deep_copy=False): """ Creates a copy of this cell. Parameters ---------- name : string The name of the cell. exclude_from_current : bool If ``True``, the cell will not be included in the global list of cells maintained by ``gdspy``. deep_copy : bool If ``False``, the new cell will contain only references to the existing elements. If ``True``, copies of all elements are also created. Returns ------- out : ``Cell`` The new copy of this cell. """ new_cell = Cell(name, exclude_from_current) if deep_copy: new_cell.elements = libCopy.deepcopy(self.elements) new_cell.labels = libCopy.deepcopy(self.labels) for ref in new_cell.get_dependencies(True): if ref._bb_valid: ref._bb_valid = False else: new_cell.elements = list(self.elements) new_cell.labels = list(self.labels) return new_cell
[ "def", "copy", "(", "self", ",", "name", ",", "exclude_from_current", "=", "False", ",", "deep_copy", "=", "False", ")", ":", "new_cell", "=", "Cell", "(", "name", ",", "exclude_from_current", ")", "if", "deep_copy", ":", "new_cell", ".", "elements", "=", ...
Creates a copy of this cell. Parameters ---------- name : string The name of the cell. exclude_from_current : bool If ``True``, the cell will not be included in the global list of cells maintained by ``gdspy``. deep_copy : bool If ``False``, the new cell will contain only references to the existing elements. If ``True``, copies of all elements are also created. Returns ------- out : ``Cell`` The new copy of this cell.
[ "Creates", "a", "copy", "of", "this", "cell", "." ]
python
train
saltstack/salt
salt/utils/nacl.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/nacl.py#L374-L396
def secretbox_decrypt(data, **kwargs): ''' Decrypt data that was encrypted using `nacl.secretbox_encrypt` using the secret key that was generated from `nacl.keygen`. CLI Examples: .. code-block:: bash salt-call nacl.secretbox_decrypt pEXHQM6cuaF7A= salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo=' ''' if data is None: return None # ensure data is in bytes data = salt.utils.stringutils.to_bytes(data) key = _get_sk(**kwargs) b = libnacl.secret.SecretBox(key=key) return b.decrypt(base64.b64decode(data))
[ "def", "secretbox_decrypt", "(", "data", ",", "*", "*", "kwargs", ")", ":", "if", "data", "is", "None", ":", "return", "None", "# ensure data is in bytes", "data", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "data", ")", "key", "...
Decrypt data that was encrypted using `nacl.secretbox_encrypt` using the secret key that was generated from `nacl.keygen`. CLI Examples: .. code-block:: bash salt-call nacl.secretbox_decrypt pEXHQM6cuaF7A= salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
[ "Decrypt", "data", "that", "was", "encrypted", "using", "nacl", ".", "secretbox_encrypt", "using", "the", "secret", "key", "that", "was", "generated", "from", "nacl", ".", "keygen", "." ]
python
train
Morrolan/surrealism
surrealism.py
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L622-L638
def __replace_adjective(sentence, counts): """Lets find and replace all instances of #ADJECTIVE :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#ADJECTIVE') != -1: sentence = sentence.replace('#ADJECTIVE', str(__get_adjective(counts)), 1) if sentence.find('#ADJECTIVE') == -1: return sentence return sentence else: return sentence
[ "def", "__replace_adjective", "(", "sentence", ",", "counts", ")", ":", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#ADJECTIVE'", ")", "!=", "-", "1", ":", "sentence", "=", "sentence", ".", "replace", "(", "'#ADJE...
Lets find and replace all instances of #ADJECTIVE :param _sentence: :param counts:
[ "Lets", "find", "and", "replace", "all", "instances", "of", "#ADJECTIVE", ":", "param", "_sentence", ":", ":", "param", "counts", ":" ]
python
train
jessamynsmith/paragres
paragres/command.py
https://github.com/jessamynsmith/paragres/blob/4e068cbfcafbe8f1b010741d38fb65d40de2c6aa/paragres/command.py#L238-L253
def capture_heroku_database(self): """ Capture Heroku database backup. """ self.print_message("Capturing database backup for app '%s'" % self.args.source_app) args = [ "heroku", "pg:backups:capture", "--app=%s" % self.args.source_app, ] if self.args.use_pgbackups: args = [ "heroku", "pgbackups:capture", "--app=%s" % self.args.source_app, "--expire", ] subprocess.check_call(args)
[ "def", "capture_heroku_database", "(", "self", ")", ":", "self", ".", "print_message", "(", "\"Capturing database backup for app '%s'\"", "%", "self", ".", "args", ".", "source_app", ")", "args", "=", "[", "\"heroku\"", ",", "\"pg:backups:capture\"", ",", "\"--app=%...
Capture Heroku database backup.
[ "Capture", "Heroku", "database", "backup", "." ]
python
train
googledatalab/pydatalab
solutionbox/ml_workbench/xgboost/trainer/task.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/trainer/task.py#L462-L582
def make_export_strategy( args, keep_target, assets_extra, features, schema, stats): """Makes prediction graph that takes json input. Args: args: command line args keep_target: If ture, target column is returned in prediction graph. Target column must also exist in input data assets_extra: other fiels to copy to the output folder job_dir: root job folder features: features dict schema: schema list stats: stats dict """ target_name = feature_transforms.get_target_name(features) csv_header = [col['name'] for col in schema] if not keep_target: csv_header.remove(target_name) def export_fn(estimator, export_dir_base, checkpoint_path=None, eval_result=None): with ops.Graph().as_default() as g: contrib_variables.create_global_step(g) input_ops = feature_transforms.build_csv_serving_tensors_for_training_step( args.analysis, features, schema, stats, keep_target) model_fn_ops = estimator._call_model_fn(input_ops.features, None, model_fn_lib.ModeKeys.INFER) output_fetch_tensors = make_prediction_output_tensors( args=args, features=features, input_ops=input_ops, model_fn_ops=model_fn_ops, keep_target=keep_target) # Don't use signature_def_utils.predict_signature_def as that renames # tensor names if there is only 1 input/output tensor! signature_inputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for key, tensor in six.iteritems(input_ops.default_inputs)} signature_outputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for key, tensor in six.iteritems(output_fetch_tensors)} signature_def_map = { 'serving_default': signature_def_utils.build_signature_def( signature_inputs, signature_outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME)} if not checkpoint_path: # Locate the latest checkpoint checkpoint_path = saver.latest_checkpoint(estimator._model_dir) if not checkpoint_path: raise ValueError("Couldn't find trained model at %s." % estimator._model_dir) export_dir = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) if (model_fn_ops.scaffold is not None and model_fn_ops.scaffold.saver is not None): saver_for_restore = model_fn_ops.scaffold.saver else: saver_for_restore = saver.Saver(sharded=True) with tf_session.Session('') as session: saver_for_restore.restore(session, checkpoint_path) init_op = control_flow_ops.group( variables.local_variables_initializer(), resources.initialize_resources(resources.shared_resources()), tf.tables_initializer()) # Perform the export builder = saved_model_builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables( session, [tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection( ops.GraphKeys.ASSET_FILEPATHS), legacy_init_op=init_op) builder.save(False) # Add the extra assets if assets_extra: assets_extra_path = os.path.join(compat.as_bytes(export_dir), compat.as_bytes('assets.extra')) for dest_relative, source in assets_extra.items(): dest_absolute = os.path.join(compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative)) dest_path = os.path.dirname(dest_absolute) file_io.recursive_create_dir(dest_path) file_io.copy(source, dest_absolute) # only keep the last 3 models saved_model_export_utils.garbage_collect_exports( export_dir_base, exports_to_keep=3) # save the last model to the model folder. # export_dir_base = A/B/intermediate_models/ if keep_target: final_dir = os.path.join(args.job_dir, 'evaluation_model') else: final_dir = os.path.join(args.job_dir, 'model') if file_io.is_directory(final_dir): file_io.delete_recursively(final_dir) file_io.recursive_create_dir(final_dir) recursive_copy(export_dir, final_dir) return export_dir if keep_target: intermediate_dir = 'intermediate_evaluation_models' else: intermediate_dir = 'intermediate_prediction_models' return export_strategy.ExportStrategy(intermediate_dir, export_fn)
[ "def", "make_export_strategy", "(", "args", ",", "keep_target", ",", "assets_extra", ",", "features", ",", "schema", ",", "stats", ")", ":", "target_name", "=", "feature_transforms", ".", "get_target_name", "(", "features", ")", "csv_header", "=", "[", "col", ...
Makes prediction graph that takes json input. Args: args: command line args keep_target: If ture, target column is returned in prediction graph. Target column must also exist in input data assets_extra: other fiels to copy to the output folder job_dir: root job folder features: features dict schema: schema list stats: stats dict
[ "Makes", "prediction", "graph", "that", "takes", "json", "input", "." ]
python
train
pytroll/satpy
satpy/node.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/node.py#L315-L344
def _get_compositor_prereqs(self, parent, prereq_names, skip=False, **dfilter): """Determine prerequisite Nodes for a composite. Args: parent (Node): Compositor node to add these prerequisites under prereq_names (sequence): Strings (names), floats (wavelengths), or DatasetIDs to analyze. skip (bool, optional): If True, prerequisites are considered optional if they can't be found and a debug message is logged. If False (default), the missing prerequisites are not logged and are expected to be handled by the caller. """ prereq_ids = [] unknowns = set() for prereq in prereq_names: n, u = self._find_dependencies(prereq, **dfilter) if u: unknowns.update(u) if skip: u_str = ", ".join([str(x) for x in u]) LOG.debug('Skipping optional %s: Unknown dataset %s', str(prereq), u_str) else: prereq_ids.append(n) self.add_child(parent, n) return prereq_ids, unknowns
[ "def", "_get_compositor_prereqs", "(", "self", ",", "parent", ",", "prereq_names", ",", "skip", "=", "False", ",", "*", "*", "dfilter", ")", ":", "prereq_ids", "=", "[", "]", "unknowns", "=", "set", "(", ")", "for", "prereq", "in", "prereq_names", ":", ...
Determine prerequisite Nodes for a composite. Args: parent (Node): Compositor node to add these prerequisites under prereq_names (sequence): Strings (names), floats (wavelengths), or DatasetIDs to analyze. skip (bool, optional): If True, prerequisites are considered optional if they can't be found and a debug message is logged. If False (default), the missing prerequisites are not logged and are expected to be handled by the caller.
[ "Determine", "prerequisite", "Nodes", "for", "a", "composite", "." ]
python
train
smarie/python-parsyfiles
parsyfiles/parsing_combining_parsers.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_combining_parsers.py#L588-L598
def _parse_singlefile(self, desired_type: Type[T], file_path: str, encoding: str, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Implementation of AnyParser API """ # first use the base parser to parse something compliant with the conversion chain first = self._base_parser._parse_singlefile(self._converter.from_type, file_path, encoding, logger, options) # then apply the conversion chain return self._converter.convert(desired_type, first, logger, options)
[ "def", "_parse_singlefile", "(", "self", ",", "desired_type", ":", "Type", "[", "T", "]", ",", "file_path", ":", "str", ",", "encoding", ":", "str", ",", "logger", ":", "Logger", ",", "options", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", ...
Implementation of AnyParser API
[ "Implementation", "of", "AnyParser", "API" ]
python
train
treycucco/bidon
bidon/db/access/model_access.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/model_access.py#L158-L166
def find_or_create(self, constructor, props, *, comp=None): """Looks for a model taht matches the given dictionary constraints. If it is not found, a new model of the given type is created and saved to the database, then returned. """ model = self.find_model(constructor, comp or props) if model is None: model = constructor(**props) self.insert_model(model) return model
[ "def", "find_or_create", "(", "self", ",", "constructor", ",", "props", ",", "*", ",", "comp", "=", "None", ")", ":", "model", "=", "self", ".", "find_model", "(", "constructor", ",", "comp", "or", "props", ")", "if", "model", "is", "None", ":", "mod...
Looks for a model taht matches the given dictionary constraints. If it is not found, a new model of the given type is created and saved to the database, then returned.
[ "Looks", "for", "a", "model", "taht", "matches", "the", "given", "dictionary", "constraints", ".", "If", "it", "is", "not", "found", "a", "new", "model", "of", "the", "given", "type", "is", "created", "and", "saved", "to", "the", "database", "then", "ret...
python
train
lowandrew/OLCTools
coreGenome/core.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/core.py#L106-L164
def reporter(metadata, analysistype, reportpath): """ Create the core genome report :param metadata: type LIST: List of metadata objects :param analysistype: type STR: Current analysis type :param reportpath: type STR: Absolute path to folder in which the reports are to be created :return: """ header = 'Strain,ClosestRef,GenesPresent/Total,\n' data = str() for sample in metadata: try: if sample[analysistype].blastresults != 'NA': if sample.general.closestrefseqgenus == 'Listeria': # Write the sample name, closest ref genome, and the # of genes found / total # of genes closestref = list(sample[analysistype].blastresults.items())[0][0] coregenes = list(sample[analysistype].blastresults.items())[0][1][0] # Find the closest reference file try: ref = glob(os.path.join(sample[analysistype].targetpath, '{fasta}*' .format(fasta=closestref)))[0] except IndexError: # Replace underscores with dashes to find files closestref = closestref.replace('_', '-') ref = glob(os.path.join(sample[analysistype].targetpath, '{fasta}*' .format(fasta=closestref)))[0] # Determine the number of core genes present in the closest reference file totalcore = 0 for _ in SeqIO.parse(ref, 'fasta'): totalcore += 1 # Add the data to the object sample[analysistype].targetspresent = coregenes sample[analysistype].totaltargets = totalcore sample[analysistype].coreresults = '{cg}/{tc}'.format(cg=coregenes, tc=totalcore) row = '{sn},{cr},{cg}/{tc}\n'.format(sn=sample.name, cr=closestref, cg=coregenes, tc=totalcore) # Open the report with open(os.path.join(sample[analysistype].reportdir, '{sn}_{at}.csv'.format(sn=sample.name, at=analysistype)), 'w') as report: # Write the row to the report report.write(header) report.write(row) data += row else: sample[analysistype].targetspresent = 'NA' sample[analysistype].totaltargets = 'NA' sample[analysistype].coreresults = 'NA' except KeyError: sample[analysistype].targetspresent = 'NA' sample[analysistype].totaltargets = 'NA' sample[analysistype].coreresults = 'NA' with open(os.path.join(reportpath, 'coregenome.csv'), 'w') as report: # Write the data to the report report.write(header) report.write(data)
[ "def", "reporter", "(", "metadata", ",", "analysistype", ",", "reportpath", ")", ":", "header", "=", "'Strain,ClosestRef,GenesPresent/Total,\\n'", "data", "=", "str", "(", ")", "for", "sample", "in", "metadata", ":", "try", ":", "if", "sample", "[", "analysist...
Create the core genome report :param metadata: type LIST: List of metadata objects :param analysistype: type STR: Current analysis type :param reportpath: type STR: Absolute path to folder in which the reports are to be created :return:
[ "Create", "the", "core", "genome", "report", ":", "param", "metadata", ":", "type", "LIST", ":", "List", "of", "metadata", "objects", ":", "param", "analysistype", ":", "type", "STR", ":", "Current", "analysis", "type", ":", "param", "reportpath", ":", "ty...
python
train
bapakode/OmMongo
ommongo/fields/sequence.py
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/fields/sequence.py#L117-L126
def validate_unwrap(self, value, session=None): ''' Checks that the type of ``value`` is correct as well as validating the elements of value''' self._validate_unwrap_type(value) self._length_valid(value) for v in value: if self.has_autoload: self._validate_child_unwrap(v, session=session) else: self._validate_child_unwrap(v)
[ "def", "validate_unwrap", "(", "self", ",", "value", ",", "session", "=", "None", ")", ":", "self", ".", "_validate_unwrap_type", "(", "value", ")", "self", ".", "_length_valid", "(", "value", ")", "for", "v", "in", "value", ":", "if", "self", ".", "ha...
Checks that the type of ``value`` is correct as well as validating the elements of value
[ "Checks", "that", "the", "type", "of", "value", "is", "correct", "as", "well", "as", "validating", "the", "elements", "of", "value" ]
python
train
nrcharles/caelum
caelum/forecast.py
https://github.com/nrcharles/caelum/blob/9a8e65806385978556d7bb2e6870f003ff82023e/caelum/forecast.py#L58-L65
def current(place): """return data as list of dicts with all data filled in.""" lat, lon = place url = "https://api.forecast.io/forecast/%s/%s,%s?solar" % (APIKEY, lat, lon) w_data = json.loads(urllib2.urlopen(url).read()) currently = w_data['currently'] return mangle(currently)
[ "def", "current", "(", "place", ")", ":", "lat", ",", "lon", "=", "place", "url", "=", "\"https://api.forecast.io/forecast/%s/%s,%s?solar\"", "%", "(", "APIKEY", ",", "lat", ",", "lon", ")", "w_data", "=", "json", ".", "loads", "(", "urllib2", ".", "urlope...
return data as list of dicts with all data filled in.
[ "return", "data", "as", "list", "of", "dicts", "with", "all", "data", "filled", "in", "." ]
python
train
google/apitools
apitools/gen/message_registry.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/message_registry.py#L311-L327
def __AddEntryType(self, entry_type_name, entry_schema, parent_name): """Add a type for a list entry.""" entry_schema.pop('description', None) description = 'Single entry in a %s.' % parent_name schema = { 'id': entry_type_name, 'type': 'object', 'description': description, 'properties': { 'entry': { 'type': 'array', 'items': entry_schema, }, }, } self.AddDescriptorFromSchema(entry_type_name, schema) return entry_type_name
[ "def", "__AddEntryType", "(", "self", ",", "entry_type_name", ",", "entry_schema", ",", "parent_name", ")", ":", "entry_schema", ".", "pop", "(", "'description'", ",", "None", ")", "description", "=", "'Single entry in a %s.'", "%", "parent_name", "schema", "=", ...
Add a type for a list entry.
[ "Add", "a", "type", "for", "a", "list", "entry", "." ]
python
train
equinor/segyio
python/segyio/field.py
https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/field.py#L453-L515
def update(self, *args, **kwargs): """d.update([E, ]**F) -> None. Update D from mapping/iterable E and F. Overwrite the values in `d` with the keys from `E` and `F`. If any key in `value` is invalid in `d`, ``KeyError`` is raised. This method is atomic - either all values in `value` are set in `d`, or none are. ``update`` does not commit a partially-updated version to disk. For kwargs, Seismic Unix-style names are supported. `BinField` and `TraceField` are not, because there are name collisions between them, although this restriction may be lifted in the future. Notes ----- .. versionchanged:: 1.3 Support for common dict operations (update, keys, values) .. versionchanged:: 1.6 Atomicity guarantee .. versionchanged:: 1.6 `**kwargs` support Examples -------- >>> e = { 1: 10, 9: 5 } >>> d.update(e) >>> l = [ (105, 11), (169, 4) ] >>> d.update(l) >>> d.update(e, iline=189, xline=193, hour=5) >>> d.update(sx=7) """ if len(args) > 1: msg = 'update expected at most 1 non-keyword argument, got {}' raise TypeError(msg.format(len(args))) buf = bytearray(self.buf) # Implementation largely borrowed from collections.mapping # If E present and has a .keys() method: for k in E: D[k] = E[k] # If E present and lacks .keys() method: for (k, v) in E: D[k] = v # In either case, this is followed by: for k, v in F.items(): D[k] = v if len(args) == 1: other = args[0] if isinstance(other, collections.Mapping): for key in other: self.putfield(buf, int(key), other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.putfield(buf, int(key), other[key]) else: for key, value in other: self.putfield(buf, int(key), value) for key, value in kwargs.items(): self.putfield(buf, int(self._kwargs[key]), value) self.buf = buf self.flush()
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", ">", "1", ":", "msg", "=", "'update expected at most 1 non-keyword argument, got {}'", "raise", "TypeError", "(", "msg", ".", "format", "(", ...
d.update([E, ]**F) -> None. Update D from mapping/iterable E and F. Overwrite the values in `d` with the keys from `E` and `F`. If any key in `value` is invalid in `d`, ``KeyError`` is raised. This method is atomic - either all values in `value` are set in `d`, or none are. ``update`` does not commit a partially-updated version to disk. For kwargs, Seismic Unix-style names are supported. `BinField` and `TraceField` are not, because there are name collisions between them, although this restriction may be lifted in the future. Notes ----- .. versionchanged:: 1.3 Support for common dict operations (update, keys, values) .. versionchanged:: 1.6 Atomicity guarantee .. versionchanged:: 1.6 `**kwargs` support Examples -------- >>> e = { 1: 10, 9: 5 } >>> d.update(e) >>> l = [ (105, 11), (169, 4) ] >>> d.update(l) >>> d.update(e, iline=189, xline=193, hour=5) >>> d.update(sx=7)
[ "d", ".", "update", "(", "[", "E", "]", "**", "F", ")", "-", ">", "None", ".", "Update", "D", "from", "mapping", "/", "iterable", "E", "and", "F", "." ]
python
train
pazz/alot
alot/db/manager.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/manager.py#L240-L261
def untag(self, querystring, tags, afterwards=None): """ removes tags from messages that match `querystring`. This appends an untag operation to the write queue and raises :exc:`~errors.DatabaseROError` if in read only mode. :param querystring: notmuch search string :type querystring: str :param tags: a list of tags to be added :type tags: list of str :param afterwards: callback that gets called after successful application of this tagging operation :type afterwards: callable :exception: :exc:`~errors.DatabaseROError` .. note:: This only adds the requested operation to the write queue. You need to call :meth:`DBManager.flush` to actually write out. """ if self.ro: raise DatabaseROError() self.writequeue.append(('untag', afterwards, querystring, tags))
[ "def", "untag", "(", "self", ",", "querystring", ",", "tags", ",", "afterwards", "=", "None", ")", ":", "if", "self", ".", "ro", ":", "raise", "DatabaseROError", "(", ")", "self", ".", "writequeue", ".", "append", "(", "(", "'untag'", ",", "afterwards"...
removes tags from messages that match `querystring`. This appends an untag operation to the write queue and raises :exc:`~errors.DatabaseROError` if in read only mode. :param querystring: notmuch search string :type querystring: str :param tags: a list of tags to be added :type tags: list of str :param afterwards: callback that gets called after successful application of this tagging operation :type afterwards: callable :exception: :exc:`~errors.DatabaseROError` .. note:: This only adds the requested operation to the write queue. You need to call :meth:`DBManager.flush` to actually write out.
[ "removes", "tags", "from", "messages", "that", "match", "querystring", ".", "This", "appends", "an", "untag", "operation", "to", "the", "write", "queue", "and", "raises", ":", "exc", ":", "~errors", ".", "DatabaseROError", "if", "in", "read", "only", "mode",...
python
train
nugget/python-insteonplm
insteonplm/tools.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/tools.py#L613-L648
def do_set_hub_connection(self, args): """Set Hub connection parameters. Usage: set_hub_connection username password host [port] Arguments: username: Hub username password: Hub password host: host name or IP address port: IP port [default 25105] """ params = args.split() username = None password = None host = None port = None try: username = params[0] password = params[1] host = params[2] port = params[3] except IndexError: pass if username and password and host: if not port: port = 25105 self.tools.username = username self.tools.password = password self.tools.host = host self.tools.port = port else: _LOGGING.error('username password host are required') self.do_help('set_hub_connection')
[ "def", "do_set_hub_connection", "(", "self", ",", "args", ")", ":", "params", "=", "args", ".", "split", "(", ")", "username", "=", "None", "password", "=", "None", "host", "=", "None", "port", "=", "None", "try", ":", "username", "=", "params", "[", ...
Set Hub connection parameters. Usage: set_hub_connection username password host [port] Arguments: username: Hub username password: Hub password host: host name or IP address port: IP port [default 25105]
[ "Set", "Hub", "connection", "parameters", "." ]
python
train
Feneric/doxypypy
doxypypy/doxypypy.py
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L594-L614
def visit_Module(self, node, **kwargs): """ Handles the module-level docstring. Process the module-level docstring and create appropriate Doxygen tags if autobrief option is set. """ containingNodes=kwargs.get('containingNodes', []) if self.options.debug: stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace, linesep)) if get_docstring(node): if self.options.topLevelNamespace: fullPathNamespace = self._getFullPathName(containingNodes) contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace) tail = '@namespace {0}'.format(contextTag) else: tail = '' self._processDocstring(node, tail) # Visit any contained nodes (in this case pretty much everything). self.generic_visit(node, containingNodes=containingNodes)
[ "def", "visit_Module", "(", "self", ",", "node", ",", "*", "*", "kwargs", ")", ":", "containingNodes", "=", "kwargs", ".", "get", "(", "'containingNodes'", ",", "[", "]", ")", "if", "self", ".", "options", ".", "debug", ":", "stderr", ".", "write", "...
Handles the module-level docstring. Process the module-level docstring and create appropriate Doxygen tags if autobrief option is set.
[ "Handles", "the", "module", "-", "level", "docstring", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L10077-L10098
def radrec(inrange, re, dec): """ Convert from range, right ascension, and declination to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/radrec_c.html :param inrange: Distance of a point from the origin. :type inrange: float :param re: Right ascension of point in radians. :type re: float :param dec: Declination of point in radians. :type dec: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats """ inrange = ctypes.c_double(inrange) re = ctypes.c_double(re) dec = ctypes.c_double(dec) rectan = stypes.emptyDoubleVector(3) libspice.radrec_c(inrange, re, dec, rectan) return stypes.cVectorToPython(rectan)
[ "def", "radrec", "(", "inrange", ",", "re", ",", "dec", ")", ":", "inrange", "=", "ctypes", ".", "c_double", "(", "inrange", ")", "re", "=", "ctypes", ".", "c_double", "(", "re", ")", "dec", "=", "ctypes", ".", "c_double", "(", "dec", ")", "rectan"...
Convert from range, right ascension, and declination to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/radrec_c.html :param inrange: Distance of a point from the origin. :type inrange: float :param re: Right ascension of point in radians. :type re: float :param dec: Declination of point in radians. :type dec: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats
[ "Convert", "from", "range", "right", "ascension", "and", "declination", "to", "rectangular", "coordinates", "." ]
python
train
materialsproject/pymatgen
pymatgen/vis/plotters.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/vis/plotters.py#L81-L95
def add_spectra(self, spectra_dict, key_sort_func=None): """ Add a dictionary of doses, with an optional sorting function for the keys. Args: dos_dict: dict of {label: Dos} key_sort_func: function used to sort the dos_dict keys. """ if key_sort_func: keys = sorted(spectra_dict.keys(), key=key_sort_func) else: keys = spectra_dict.keys() for label in keys: self.add_spectra(label, spectra_dict[label])
[ "def", "add_spectra", "(", "self", ",", "spectra_dict", ",", "key_sort_func", "=", "None", ")", ":", "if", "key_sort_func", ":", "keys", "=", "sorted", "(", "spectra_dict", ".", "keys", "(", ")", ",", "key", "=", "key_sort_func", ")", "else", ":", "keys"...
Add a dictionary of doses, with an optional sorting function for the keys. Args: dos_dict: dict of {label: Dos} key_sort_func: function used to sort the dos_dict keys.
[ "Add", "a", "dictionary", "of", "doses", "with", "an", "optional", "sorting", "function", "for", "the", "keys", "." ]
python
train
numenta/nupic
src/nupic/datafiles/extra/regression/makeDataset.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/datafiles/extra/regression/makeDataset.py#L93-L111
def _generateFile(filename, data): """ Parameters: ---------------------------------------------------------------- filename: name of .csv file to generate """ # Create the file print "Creating %s..." % (filename) numRecords, numFields = data.shape fields = [('field%d'%(i+1), 'float', '') for i in range(numFields)] outFile = File(filename, fields) for i in xrange(numRecords): outFile.write(data[i].tolist()) outFile.close()
[ "def", "_generateFile", "(", "filename", ",", "data", ")", ":", "# Create the file", "print", "\"Creating %s...\"", "%", "(", "filename", ")", "numRecords", ",", "numFields", "=", "data", ".", "shape", "fields", "=", "[", "(", "'field%d'", "%", "(", "i", "...
Parameters: ---------------------------------------------------------------- filename: name of .csv file to generate
[ "Parameters", ":", "----------------------------------------------------------------", "filename", ":", "name", "of", ".", "csv", "file", "to", "generate" ]
python
valid
aliyun/aliyun-odps-python-sdk
odps/ml/expr/mixin.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/ml/expr/mixin.py#L537-L590
def merge_data(*data_frames, **kwargs): """ Merge DataFrames by column. Number of rows in tables must be the same. This method can be called both outside and as a DataFrame method. :param list[DataFrame] data_frames: DataFrames to be merged. :param bool auto_rename: if True, fields in source DataFrames will be renamed in the output. :return: merged data frame. :rtype: DataFrame :Example: >>> merged1 = merge_data(df1, df2) >>> merged2 = df1.merge_with(df2, auto_rename_col=True) """ from .specialized import build_merge_expr from ..utils import ML_ARG_PREFIX if len(data_frames) <= 1: raise ValueError('Count of DataFrames should be at least 2.') norm_data_pairs = [] df_tuple = collections.namedtuple('MergeTuple', 'df cols exclude') for pair in data_frames: if isinstance(pair, tuple): if len(pair) == 2: df, cols = pair exclude = False else: df, cols, exclude = pair if isinstance(cols, six.string_types): cols = cols.split(',') else: df, cols, exclude = pair, None, False norm_data_pairs.append(df_tuple(df, cols, exclude)) auto_rename = kwargs.get('auto_rename', False) sel_cols_dict = dict((idx, tp.cols) for idx, tp in enumerate(norm_data_pairs) if tp.cols and not tp.exclude) ex_cols_dict = dict((idx, tp.cols) for idx, tp in enumerate(norm_data_pairs) if tp.cols and tp.exclude) merge_expr = build_merge_expr(len(norm_data_pairs)) arg_dict = dict(_params={'autoRenameCol': str(auto_rename)}, selected_cols=sel_cols_dict, excluded_cols=ex_cols_dict) for idx, dp in enumerate(norm_data_pairs): arg_dict[ML_ARG_PREFIX + 'input%d' % (1 + idx)] = dp.df out_df = merge_expr(register_expr=True, _exec_id=uuid.uuid4(), _output_name='output', **arg_dict) out_df._ml_uplink = [dp.df for dp in norm_data_pairs] out_df._perform_operation(op.MergeFieldsOperation(auto_rename, sel_cols_dict, ex_cols_dict)) out_df._rebuild_df_schema() return out_df
[ "def", "merge_data", "(", "*", "data_frames", ",", "*", "*", "kwargs", ")", ":", "from", ".", "specialized", "import", "build_merge_expr", "from", ".", ".", "utils", "import", "ML_ARG_PREFIX", "if", "len", "(", "data_frames", ")", "<=", "1", ":", "raise", ...
Merge DataFrames by column. Number of rows in tables must be the same. This method can be called both outside and as a DataFrame method. :param list[DataFrame] data_frames: DataFrames to be merged. :param bool auto_rename: if True, fields in source DataFrames will be renamed in the output. :return: merged data frame. :rtype: DataFrame :Example: >>> merged1 = merge_data(df1, df2) >>> merged2 = df1.merge_with(df2, auto_rename_col=True)
[ "Merge", "DataFrames", "by", "column", ".", "Number", "of", "rows", "in", "tables", "must", "be", "the", "same", "." ]
python
train
ucsb-cs-education/hairball
hairball/plugins/blocks.py
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/blocks.py#L23-L30
def analyze(self, scratch, **kwargs): """Run and return the results from the BlockCounts plugin.""" file_blocks = Counter() for script in self.iter_scripts(scratch): for name, _, _ in self.iter_blocks(script.blocks): file_blocks[name] += 1 self.blocks.update(file_blocks) # Update the overall count return {'types': file_blocks}
[ "def", "analyze", "(", "self", ",", "scratch", ",", "*", "*", "kwargs", ")", ":", "file_blocks", "=", "Counter", "(", ")", "for", "script", "in", "self", ".", "iter_scripts", "(", "scratch", ")", ":", "for", "name", ",", "_", ",", "_", "in", "self"...
Run and return the results from the BlockCounts plugin.
[ "Run", "and", "return", "the", "results", "from", "the", "BlockCounts", "plugin", "." ]
python
train
Telefonica/toolium
toolium/config_driver.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/config_driver.py#L128-L160
def _create_local_driver(self): """Create a driver in local machine :returns: a new local selenium driver """ driver_type = self.config.get('Driver', 'type') driver_name = driver_type.split('-')[0] if driver_name in ('android', 'ios', 'iphone'): # Create local appium driver driver = self._setup_appium() else: driver_setup = { 'firefox': self._setup_firefox, 'chrome': self._setup_chrome, 'safari': self._setup_safari, 'opera': self._setup_opera, 'iexplore': self._setup_explorer, 'edge': self._setup_edge, 'phantomjs': self._setup_phantomjs } driver_setup_method = driver_setup.get(driver_name) if not driver_setup_method: raise Exception('Unknown driver {0}'.format(driver_name)) # Get driver capabilities capabilities = self._get_capabilities_from_driver_type(driver_name) self._add_capabilities_from_properties(capabilities, 'Capabilities') # Create local selenium driver driver = driver_setup_method(capabilities) return driver
[ "def", "_create_local_driver", "(", "self", ")", ":", "driver_type", "=", "self", ".", "config", ".", "get", "(", "'Driver'", ",", "'type'", ")", "driver_name", "=", "driver_type", ".", "split", "(", "'-'", ")", "[", "0", "]", "if", "driver_name", "in", ...
Create a driver in local machine :returns: a new local selenium driver
[ "Create", "a", "driver", "in", "local", "machine" ]
python
train
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L676-L698
def get_from_dicts(dict1, key, default_value, dict2, extra=''): """ Input: dict1 - first check in this dict (and remove if there) key - key in dict1 default_value - default value if not found dict2 - then check from here Output: value """ value=default_value if key not in dict1: if dict2!=None: value=dict2.get(extra+key, default_value) else: value=dict1[key] del(dict1[key]) if dict2!=None: dict2[extra+key]=value return value
[ "def", "get_from_dicts", "(", "dict1", ",", "key", ",", "default_value", ",", "dict2", ",", "extra", "=", "''", ")", ":", "value", "=", "default_value", "if", "key", "not", "in", "dict1", ":", "if", "dict2", "!=", "None", ":", "value", "=", "dict2", ...
Input: dict1 - first check in this dict (and remove if there) key - key in dict1 default_value - default value if not found dict2 - then check from here Output: value
[ "Input", ":", "dict1", "-", "first", "check", "in", "this", "dict", "(", "and", "remove", "if", "there", ")", "key", "-", "key", "in", "dict1", "default_value", "-", "default", "value", "if", "not", "found", "dict2", "-", "then", "check", "from", "here...
python
train
ionelmc/python-cogen
cogen/core/schedulers.py
https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/core/schedulers.py#L178-L212
def iter_run(self): """ The actual processing for the main loop is here. Running the main loop as a generator (where a iteration is a full sched, proactor and timers/timeouts run) is usefull for interleaving the main loop with other applications that have a blocking main loop and require cogen to run in the same thread. """ self.running = True urgent = None while self.running and (self.active or self.proactor or self.timeouts or urgent): if self.active or urgent: op, coro = urgent or self.active.popleft() urgent = None while True: op, coro = self.process_op(coro.run_op(op, self), coro) if not op and not coro: break if (self.proactor_greedy or not self.active) and self.proactor: try: urgent = self.proactor.run(timeout = self.next_timer_delta()) except (OSError, select.error, IOError), exc: if exc[0] != errno.EINTR: raise #~ if urgent:print '>urgent:', urgent if self.timeouts: self.handle_timeouts() yield # this could had beed a ordinary function and have the run() call #this repeatedly but the _urgent_ operation this is usefull (as it #saves us needlessly hammering the active coroutines queue with #append and pop calls on the same thing self.cleanup()
[ "def", "iter_run", "(", "self", ")", ":", "self", ".", "running", "=", "True", "urgent", "=", "None", "while", "self", ".", "running", "and", "(", "self", ".", "active", "or", "self", ".", "proactor", "or", "self", ".", "timeouts", "or", "urgent", ")...
The actual processing for the main loop is here. Running the main loop as a generator (where a iteration is a full sched, proactor and timers/timeouts run) is usefull for interleaving the main loop with other applications that have a blocking main loop and require cogen to run in the same thread.
[ "The", "actual", "processing", "for", "the", "main", "loop", "is", "here", ".", "Running", "the", "main", "loop", "as", "a", "generator", "(", "where", "a", "iteration", "is", "a", "full", "sched", "proactor", "and", "timers", "/", "timeouts", "run", ")"...
python
train
CI-WATER/gsshapy
gsshapy/modeling/event.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L135-L139
def _update_card(self, card_name, new_value, add_quotes=False): """ Adds/updates card for gssha project file """ self.project_manager.setCard(card_name, new_value, add_quotes)
[ "def", "_update_card", "(", "self", ",", "card_name", ",", "new_value", ",", "add_quotes", "=", "False", ")", ":", "self", ".", "project_manager", ".", "setCard", "(", "card_name", ",", "new_value", ",", "add_quotes", ")" ]
Adds/updates card for gssha project file
[ "Adds", "/", "updates", "card", "for", "gssha", "project", "file" ]
python
train
jreinhardt/constraining-order
src/constrainingorder/sets.py
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/sets.py#L492-L509
def intersection(self,other): """ Return a new DiscreteSet with the intersection of the two sets, i.e. all elements that are in both self and other. :param DiscreteSet other: Set to intersect with :rtype: DiscreteSet """ if self.everything: if other.everything: return DiscreteSet() else: return DiscreteSet(other.elements) else: if other.everything: return DiscreteSet(self.elements) else: return DiscreteSet(self.elements.intersection(other.elements))
[ "def", "intersection", "(", "self", ",", "other", ")", ":", "if", "self", ".", "everything", ":", "if", "other", ".", "everything", ":", "return", "DiscreteSet", "(", ")", "else", ":", "return", "DiscreteSet", "(", "other", ".", "elements", ")", "else", ...
Return a new DiscreteSet with the intersection of the two sets, i.e. all elements that are in both self and other. :param DiscreteSet other: Set to intersect with :rtype: DiscreteSet
[ "Return", "a", "new", "DiscreteSet", "with", "the", "intersection", "of", "the", "two", "sets", "i", ".", "e", ".", "all", "elements", "that", "are", "in", "both", "self", "and", "other", "." ]
python
train
ihmeuw/vivarium
src/vivarium/config_tree.py
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/config_tree.py#L328-L363
def update(self, data: Union[Mapping, str, bytes], layer: str=None, source: str=None): """Adds additional data into the ConfigTree. Parameters ---------- data : source data layer : layer to load data into. If none is supplied the outermost one is used source : Source to attribute the values to See Also -------- read_dict """ if isinstance(data, dict): self._read_dict(data, layer, source) elif isinstance(data, ConfigTree): # TODO: set this to parse the other config tree including layer and source info. Maybe. self._read_dict(data.to_dict(), layer, source) elif isinstance(data, str): if data.endswith(('.yaml', '.yml')): source = source if source else data self._load(data, layer, source) else: try: self._loads(data, layer, source) except AttributeError: raise ValueError("The string data should be yaml formated string or path to .yaml/.yml file") elif data is None: pass else: raise ValueError(f"Update must be called with dictionary, string, or ConfigTree. " f"You passed in {type(data)}")
[ "def", "update", "(", "self", ",", "data", ":", "Union", "[", "Mapping", ",", "str", ",", "bytes", "]", ",", "layer", ":", "str", "=", "None", ",", "source", ":", "str", "=", "None", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ...
Adds additional data into the ConfigTree. Parameters ---------- data : source data layer : layer to load data into. If none is supplied the outermost one is used source : Source to attribute the values to See Also -------- read_dict
[ "Adds", "additional", "data", "into", "the", "ConfigTree", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_sanity_checks.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_sanity_checks.py#L83-L92
def _sanity_check_fold_scope_locations_are_unique(ir_blocks): """Assert that every FoldScopeLocation that exists on a Fold block is unique.""" observed_locations = dict() for block in ir_blocks: if isinstance(block, Fold): alternate = observed_locations.get(block.fold_scope_location, None) if alternate is not None: raise AssertionError(u'Found two Fold blocks with identical FoldScopeLocations: ' u'{} {} {}'.format(alternate, block, ir_blocks)) observed_locations[block.fold_scope_location] = block
[ "def", "_sanity_check_fold_scope_locations_are_unique", "(", "ir_blocks", ")", ":", "observed_locations", "=", "dict", "(", ")", "for", "block", "in", "ir_blocks", ":", "if", "isinstance", "(", "block", ",", "Fold", ")", ":", "alternate", "=", "observed_locations"...
Assert that every FoldScopeLocation that exists on a Fold block is unique.
[ "Assert", "that", "every", "FoldScopeLocation", "that", "exists", "on", "a", "Fold", "block", "is", "unique", "." ]
python
train
intake/intake
intake/catalog/base.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/catalog/base.py#L267-L279
def serialize(self): """ Produce YAML version of this catalog. Note that this is not the same as ``.yaml()``, which produces a YAML block referring to this catalog. """ import yaml output = {"metadata": self.metadata, "sources": {}, "name": self.name} for key, entry in self.items(): output["sources"][key] = entry._captured_init_kwargs return yaml.dump(output)
[ "def", "serialize", "(", "self", ")", ":", "import", "yaml", "output", "=", "{", "\"metadata\"", ":", "self", ".", "metadata", ",", "\"sources\"", ":", "{", "}", ",", "\"name\"", ":", "self", ".", "name", "}", "for", "key", ",", "entry", "in", "self"...
Produce YAML version of this catalog. Note that this is not the same as ``.yaml()``, which produces a YAML block referring to this catalog.
[ "Produce", "YAML", "version", "of", "this", "catalog", "." ]
python
train
materialsproject/pymatgen
pymatgen/ext/matproj.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/ext/matproj.py#L1089-L1102
def get_reaction(self, reactants, products): """ Gets a reaction from the Materials Project. Args: reactants ([str]): List of formulas products ([str]): List of formulas Returns: rxn """ return self._make_request("/reaction", payload={"reactants[]": reactants, "products[]": products}, mp_decode=False)
[ "def", "get_reaction", "(", "self", ",", "reactants", ",", "products", ")", ":", "return", "self", ".", "_make_request", "(", "\"/reaction\"", ",", "payload", "=", "{", "\"reactants[]\"", ":", "reactants", ",", "\"products[]\"", ":", "products", "}", ",", "m...
Gets a reaction from the Materials Project. Args: reactants ([str]): List of formulas products ([str]): List of formulas Returns: rxn
[ "Gets", "a", "reaction", "from", "the", "Materials", "Project", "." ]
python
train
phn/angles
angles.py
https://github.com/phn/angles/blob/5c30ed7c3a7412177daaed180bf3b2351b287589/angles.py#L2215-L2233
def bear(self, p): """Find position angle between objects, in radians. Parameters ---------- p : AngularPosition The object to which bearing must be determined. Notes ----- This method calls the function bear(). See its docstring for details. See also -------- bear """ return bear(self.alpha.r, self.delta.r, p.alpha.r, p.delta.r)
[ "def", "bear", "(", "self", ",", "p", ")", ":", "return", "bear", "(", "self", ".", "alpha", ".", "r", ",", "self", ".", "delta", ".", "r", ",", "p", ".", "alpha", ".", "r", ",", "p", ".", "delta", ".", "r", ")" ]
Find position angle between objects, in radians. Parameters ---------- p : AngularPosition The object to which bearing must be determined. Notes ----- This method calls the function bear(). See its docstring for details. See also -------- bear
[ "Find", "position", "angle", "between", "objects", "in", "radians", "." ]
python
train
jlinn/pyflare
pyflare/hosting.py
https://github.com/jlinn/pyflare/blob/1108e82a9622d1aa6d92d4c4797744ff3cf41f68/pyflare/hosting.py#L196-L244
def zone_list( self, user_key, limit=100, offset=0, zone_name=None, sub_id=None, zone_status='ALL', sub_status='ALL', ): """ List zones for a user. :param user_key: key for authentication of user :type user_key: str :param limit: limit of zones shown :type limit: int :param offset: offset of zones to be shown :type offset: int :param zone_name: name of zone to lookup :type zone_name: str :param sub_id: subscription id of reseller (only for use by resellers) :type sub_id: str :param zone_status: status of zones to be shown :type zone_status: str (one of: V(active), D(deleted), ALL) :param sub_status: status of subscription of zones to be shown :type zone_name: str (one of: V(active), CNL(cancelled), ALL ) :returns: :rtype: dict """ if zone_status not in ['V', 'D', 'ALL']: raise ValueError('zone_status has to be V, D or ALL') if sub_status not in ['V', 'CNL', 'ALL']: raise ValueError('sub_status has to be V, CNL or ALL') params = { 'act': 'zone_list', 'user_key': user_key, 'limit': limit, 'offset': offset, 'zone_status': zone_status, 'sub_status': sub_status } if zone_name: params['zone_name'] = zone_name if sub_id: params['sub_id'] = sub_id return self._request(params)
[ "def", "zone_list", "(", "self", ",", "user_key", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "zone_name", "=", "None", ",", "sub_id", "=", "None", ",", "zone_status", "=", "'ALL'", ",", "sub_status", "=", "'ALL'", ",", ")", ":", "if", ...
List zones for a user. :param user_key: key for authentication of user :type user_key: str :param limit: limit of zones shown :type limit: int :param offset: offset of zones to be shown :type offset: int :param zone_name: name of zone to lookup :type zone_name: str :param sub_id: subscription id of reseller (only for use by resellers) :type sub_id: str :param zone_status: status of zones to be shown :type zone_status: str (one of: V(active), D(deleted), ALL) :param sub_status: status of subscription of zones to be shown :type zone_name: str (one of: V(active), CNL(cancelled), ALL ) :returns: :rtype: dict
[ "List", "zones", "for", "a", "user", "." ]
python
train
rlabbe/filterpy
filterpy/kalman/kalman_filter.py
https://github.com/rlabbe/filterpy/blob/8123214de798ffb63db968bb0b9492ee74e77950/filterpy/kalman/kalman_filter.py#L1100-L1106
def log_likelihood(self): """ log-likelihood of the last measurement. """ if self._log_likelihood is None: self._log_likelihood = logpdf(x=self.y, cov=self.S) return self._log_likelihood
[ "def", "log_likelihood", "(", "self", ")", ":", "if", "self", ".", "_log_likelihood", "is", "None", ":", "self", ".", "_log_likelihood", "=", "logpdf", "(", "x", "=", "self", ".", "y", ",", "cov", "=", "self", ".", "S", ")", "return", "self", ".", ...
log-likelihood of the last measurement.
[ "log", "-", "likelihood", "of", "the", "last", "measurement", "." ]
python
train
jaraco/tempora
tempora/schedule.py
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/schedule.py#L29-L36
def from_timestamp(ts): """ Convert a numeric timestamp to a timezone-aware datetime. A client may override this function to change the default behavior, such as to use local time or timezone-naïve times. """ return datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc)
[ "def", "from_timestamp", "(", "ts", ")", ":", "return", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "ts", ")", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")" ]
Convert a numeric timestamp to a timezone-aware datetime. A client may override this function to change the default behavior, such as to use local time or timezone-naïve times.
[ "Convert", "a", "numeric", "timestamp", "to", "a", "timezone", "-", "aware", "datetime", "." ]
python
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L319-L411
def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7], nL1SimpleSequences=50, nL1HubSequences=50, l1Pooling=4, perfectStability=False, spHysteresisFactor=1.0, patternLen=500, patternActivity=50): """ Generate the simulated output from a spatial pooler that's sitting on top of another spatial pooler / temporal memory pair. The average on-time of the outputs from the simulated TM is given by the l1Pooling argument. In this routine, L1 refers to the first spatial and temporal memory and L2 refers to the spatial pooler above that. Parameters: ----------------------------------------------- nL1Patterns: the number of patterns to use in the L1 sequences. l1Hubs: which of the elements will be used as hubs. l1SeqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nL1SimpleSequences: The number of simple sequences to generate for L1 nL1HubSequences: The number of hub sequences to generate for L1 l1Pooling: The number of time steps to pool over in the L1 temporal pooler perfectStability: If true, then the input patterns represented by the sequences generated will have perfect stability over l1Pooling time steps. This is the best case ideal input to a TM. In actual situations, with an actual SP providing input, the stability will always be less than this. spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler. Only used when perfectStability is False patternLen: The number of elements in each pattern output by L2 patternActivity: The number of elements that should be active in each pattern @retval: (seqList, patterns) seqList: a list of sequences output from L2. Each sequence is itself a list containing the input pattern indices for that sequence. patterns: the input patterns used in the L2 seqList. """ # First, generate the L1 sequences l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength, nSeq=nL1SimpleSequences) + \ generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs, seqLength=l1SeqLength, nSeq=nL1HubSequences) # Generate the L2 SP output from those spOutput = generateSlowSPOutput(seqListBelow = l1SeqList, poolingTimeBelow=l1Pooling, outputWidth=patternLen, activity=patternActivity, perfectStability=perfectStability, spHysteresisFactor=spHysteresisFactor) # Map the spOutput patterns into indices into a pattern matrix which we # generate now. outSeq = None outSeqList = [] outPatterns = SM32(0, patternLen) for pattern in spOutput: # If we have a reset vector start a new sequence if pattern.sum() == 0: if outSeq is not None: outSeqList.append(outSeq) outSeq = [] continue # See if this vector matches a pattern we've already seen before patternIdx = None if outPatterns.nRows() > 0: # Find most matching 1's. matches = outPatterns.rightVecSumAtNZ(pattern) outCoinc = matches.argmax().astype('uint32') # See if its number of 1's is the same in the pattern and in the # coincidence row. If so, it is an exact match numOnes = pattern.sum() if matches[outCoinc] == numOnes \ and outPatterns.getRow(int(outCoinc)).sum() == numOnes: patternIdx = outCoinc # If no match, add this pattern to our matrix if patternIdx is None: outPatterns.addRow(pattern) patternIdx = outPatterns.nRows() - 1 # Store the pattern index into the sequence outSeq.append(patternIdx) # Put in last finished sequence if outSeq is not None: outSeqList.append(outSeq) # Return with the seqList and patterns matrix return (outSeqList, outPatterns)
[ "def", "generateL2Sequences", "(", "nL1Patterns", "=", "10", ",", "l1Hubs", "=", "[", "2", ",", "6", "]", ",", "l1SeqLength", "=", "[", "5", ",", "6", ",", "7", "]", ",", "nL1SimpleSequences", "=", "50", ",", "nL1HubSequences", "=", "50", ",", "l1Poo...
Generate the simulated output from a spatial pooler that's sitting on top of another spatial pooler / temporal memory pair. The average on-time of the outputs from the simulated TM is given by the l1Pooling argument. In this routine, L1 refers to the first spatial and temporal memory and L2 refers to the spatial pooler above that. Parameters: ----------------------------------------------- nL1Patterns: the number of patterns to use in the L1 sequences. l1Hubs: which of the elements will be used as hubs. l1SeqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nL1SimpleSequences: The number of simple sequences to generate for L1 nL1HubSequences: The number of hub sequences to generate for L1 l1Pooling: The number of time steps to pool over in the L1 temporal pooler perfectStability: If true, then the input patterns represented by the sequences generated will have perfect stability over l1Pooling time steps. This is the best case ideal input to a TM. In actual situations, with an actual SP providing input, the stability will always be less than this. spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler. Only used when perfectStability is False patternLen: The number of elements in each pattern output by L2 patternActivity: The number of elements that should be active in each pattern @retval: (seqList, patterns) seqList: a list of sequences output from L2. Each sequence is itself a list containing the input pattern indices for that sequence. patterns: the input patterns used in the L2 seqList.
[ "Generate", "the", "simulated", "output", "from", "a", "spatial", "pooler", "that", "s", "sitting", "on", "top", "of", "another", "spatial", "pooler", "/", "temporal", "memory", "pair", ".", "The", "average", "on", "-", "time", "of", "the", "outputs", "fro...
python
valid
gem/oq-engine
openquake/hmtk/parsers/faults/fault_yaml_parser.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/parsers/faults/fault_yaml_parser.py#L89-L105
def parse_tect_region_dict_to_tuples(region_dict): ''' Parses the tectonic regionalisation dictionary attributes to tuples ''' output_region_dict = [] tuple_keys = ['Displacement_Length_Ratio', 'Shear_Modulus'] # Convert MSR string name to openquake.hazardlib.scalerel object for region in region_dict: for val_name in tuple_keys: region[val_name] = weight_list_to_tuple(region[val_name], val_name) # MSR works differently - so call get_scaling_relation_tuple region['Magnitude_Scaling_Relation'] = weight_list_to_tuple( region['Magnitude_Scaling_Relation'], 'Magnitude Scaling Relation') output_region_dict.append(region) return output_region_dict
[ "def", "parse_tect_region_dict_to_tuples", "(", "region_dict", ")", ":", "output_region_dict", "=", "[", "]", "tuple_keys", "=", "[", "'Displacement_Length_Ratio'", ",", "'Shear_Modulus'", "]", "# Convert MSR string name to openquake.hazardlib.scalerel object", "for", "region",...
Parses the tectonic regionalisation dictionary attributes to tuples
[ "Parses", "the", "tectonic", "regionalisation", "dictionary", "attributes", "to", "tuples" ]
python
train
esafak/mca
src/mca.py
https://github.com/esafak/mca/blob/f2b79ecbf37629902ccdbad2e1a556977c53d370/src/mca.py#L30-L34
def dummy(DF, cols=None): """Dummy code select columns of a DataFrame.""" dummies = (get_dummies(DF[col]) for col in (DF.columns if cols is None else cols)) return concat(dummies, axis=1, keys=DF.columns)
[ "def", "dummy", "(", "DF", ",", "cols", "=", "None", ")", ":", "dummies", "=", "(", "get_dummies", "(", "DF", "[", "col", "]", ")", "for", "col", "in", "(", "DF", ".", "columns", "if", "cols", "is", "None", "else", "cols", ")", ")", "return", "...
Dummy code select columns of a DataFrame.
[ "Dummy", "code", "select", "columns", "of", "a", "DataFrame", "." ]
python
train
awslabs/sockeye
sockeye/extract_parameters.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/extract_parameters.py#L34-L51
def _extract(param_names: List[str], params: Dict[str, mx.nd.NDArray], ext_params: Dict[str, np.ndarray]) -> List[str]: """ Extract specific parameters from a given base. :param param_names: Names of parameters to be extracted. :param params: Mapping from parameter names to the actual NDArrays parameters. :param ext_params: Extracted parameter dictionary. :return: Remaining names of parameters to be extracted. """ remaining_param_names = list(param_names) for name in param_names: if name in params: logger.info("\tFound '%s': shape=%s", name, str(params[name].shape)) ext_params[name] = params[name].asnumpy() remaining_param_names.remove(name) return remaining_param_names
[ "def", "_extract", "(", "param_names", ":", "List", "[", "str", "]", ",", "params", ":", "Dict", "[", "str", ",", "mx", ".", "nd", ".", "NDArray", "]", ",", "ext_params", ":", "Dict", "[", "str", ",", "np", ".", "ndarray", "]", ")", "->", "List",...
Extract specific parameters from a given base. :param param_names: Names of parameters to be extracted. :param params: Mapping from parameter names to the actual NDArrays parameters. :param ext_params: Extracted parameter dictionary. :return: Remaining names of parameters to be extracted.
[ "Extract", "specific", "parameters", "from", "a", "given", "base", "." ]
python
train
MacHu-GWU/pathlib_mate-project
pathlib_mate/mate_path_filters.py
https://github.com/MacHu-GWU/pathlib_mate-project/blob/f9fb99dd7cc9ea05d1bec8b9ce8f659e8d97b0f1/pathlib_mate/mate_path_filters.py#L144-L153
def n_subdir(self): """ Count how many folders in this directory (doesn't include folder in sub folders). """ self.assert_is_dir_and_exists() n = 0 for _ in self.select_dir(recursive=False): n += 1 return n
[ "def", "n_subdir", "(", "self", ")", ":", "self", ".", "assert_is_dir_and_exists", "(", ")", "n", "=", "0", "for", "_", "in", "self", ".", "select_dir", "(", "recursive", "=", "False", ")", ":", "n", "+=", "1", "return", "n" ]
Count how many folders in this directory (doesn't include folder in sub folders).
[ "Count", "how", "many", "folders", "in", "this", "directory", "(", "doesn", "t", "include", "folder", "in", "sub", "folders", ")", "." ]
python
valid
pallets/pallets-sphinx-themes
src/pallets_sphinx_themes/themes/click/domain.py
https://github.com/pallets/pallets-sphinx-themes/blob/1d4517d76dd492017f17acd7f72e82e40a1f1bc6/src/pallets_sphinx_themes/themes/click/domain.py#L199-L206
def get_example_runner(document): """Get or create the :class:`ExampleRunner` instance associated with a document. """ runner = getattr(document, "click_example_runner", None) if runner is None: runner = document.click_example_runner = ExampleRunner() return runner
[ "def", "get_example_runner", "(", "document", ")", ":", "runner", "=", "getattr", "(", "document", ",", "\"click_example_runner\"", ",", "None", ")", "if", "runner", "is", "None", ":", "runner", "=", "document", ".", "click_example_runner", "=", "ExampleRunner",...
Get or create the :class:`ExampleRunner` instance associated with a document.
[ "Get", "or", "create", "the", ":", "class", ":", "ExampleRunner", "instance", "associated", "with", "a", "document", "." ]
python
train
mardix/pylot
pylot/utils.py
https://github.com/mardix/pylot/blob/506a33a56ebdfc0925b94015e8cf98ccb16a143c/pylot/utils.py#L170-L183
def is_port_open(port, host="127.0.0.1"): """ Check if a port is open :param port: :param host: :return bool: """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((host, int(port))) s.shutdown(2) return True except Exception as e: return False
[ "def", "is_port_open", "(", "port", ",", "host", "=", "\"127.0.0.1\"", ")", ":", "s", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "try", ":", "s", ".", "connect", "(", "(", "host", ",", "int", ...
Check if a port is open :param port: :param host: :return bool:
[ "Check", "if", "a", "port", "is", "open", ":", "param", "port", ":", ":", "param", "host", ":", ":", "return", "bool", ":" ]
python
train
kmerkmer/pymer
pymer/base.py
https://github.com/kmerkmer/pymer/blob/c22802436b3756a2e92829c9b234bde6217b683a/pymer/base.py#L27-L30
def consume(self, seq): '''Counts all k-mers in sequence.''' for kmer in iter_kmers(seq, self.k, canonical=self.canonical): self._incr(kmer)
[ "def", "consume", "(", "self", ",", "seq", ")", ":", "for", "kmer", "in", "iter_kmers", "(", "seq", ",", "self", ".", "k", ",", "canonical", "=", "self", ".", "canonical", ")", ":", "self", ".", "_incr", "(", "kmer", ")" ]
Counts all k-mers in sequence.
[ "Counts", "all", "k", "-", "mers", "in", "sequence", "." ]
python
train
glitchassassin/lackey
lackey/RegionMatching.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1486-L1492
def stopObserver(self): """ Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically. """ self._observer.isStopped = True self._observer.isRunning = False
[ "def", "stopObserver", "(", "self", ")", ":", "self", ".", "_observer", ".", "isStopped", "=", "True", "self", ".", "_observer", ".", "isRunning", "=", "False" ]
Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically.
[ "Stops", "this", "region", "s", "observer", "loop", "." ]
python
train
WebarchivCZ/WA-KAT
bin/wa_kat_build_keyword_index.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/bin/wa_kat_build_keyword_index.py#L99-L132
def _download_items(db, last_id): """ Download items from the aleph and store them in `db`. Start from `last_id` if specified. Args: db (obj): Dictionary-like object used as DB. last_id (int): Start from this id. """ MAX_RETRY = 20 # how many times to try till decision that this is an end MAX_DOC_ID = 10000000 # this is used for download iterator not_found_cnt = 0 # circuit breaker for doc_id in xrange(last_id, MAX_DOC_ID): doc_id += 1 print "Downloading %d.." % (doc_id) if not_found_cnt >= MAX_RETRY: print "It looks like this is an end:", doc_id - MAX_RETRY break try: record = _download(doc_id) except (DocumentNotFoundException, InvalidAlephBaseException): print "\tnot found, skipping" not_found_cnt += 1 continue not_found_cnt = 0 db["item_%d" % doc_id] = record db["last_id"] = doc_id - MAX_RETRY if doc_id > MAX_RETRY else 1 if doc_id % 100 == 0: db.commit()
[ "def", "_download_items", "(", "db", ",", "last_id", ")", ":", "MAX_RETRY", "=", "20", "# how many times to try till decision that this is an end", "MAX_DOC_ID", "=", "10000000", "# this is used for download iterator", "not_found_cnt", "=", "0", "# circuit breaker", "for", ...
Download items from the aleph and store them in `db`. Start from `last_id` if specified. Args: db (obj): Dictionary-like object used as DB. last_id (int): Start from this id.
[ "Download", "items", "from", "the", "aleph", "and", "store", "them", "in", "db", ".", "Start", "from", "last_id", "if", "specified", "." ]
python
train
fabioz/PyDev.Debugger
third_party/pep8/pycodestyle.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L1397-L1419
def parse_udiff(diff, patterns=None, parent='.'): """Return a dictionary of matching lines.""" # For each file of the diff, the entry key is the filename, # and the value is a set of row numbers to consider. rv = {} path = nrows = None for line in diff.splitlines(): if nrows: if line[:1] != '-': nrows -= 1 continue if line[:3] == '@@ ': hunk_match = HUNK_REGEX.match(line) (row, nrows) = [int(g or '1') for g in hunk_match.groups()] rv[path].update(range(row, row + nrows)) elif line[:3] == '+++': path = line[4:].split('\t', 1)[0] if path[:2] == 'b/': path = path[2:] rv[path] = set() return dict([(os.path.join(parent, path), rows) for (path, rows) in rv.items() if rows and filename_match(path, patterns)])
[ "def", "parse_udiff", "(", "diff", ",", "patterns", "=", "None", ",", "parent", "=", "'.'", ")", ":", "# For each file of the diff, the entry key is the filename,", "# and the value is a set of row numbers to consider.", "rv", "=", "{", "}", "path", "=", "nrows", "=", ...
Return a dictionary of matching lines.
[ "Return", "a", "dictionary", "of", "matching", "lines", "." ]
python
train
Spinmob/spinmob
_functions.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_functions.py#L1395-L1403
def sort_matrix(a,n=0): """ This will rearrange the array a[n] from lowest to highest, and rearrange the rest of a[i]'s in the same way. It is dumb and slow. Returns a numpy array. """ a = _n.array(a) return a[:,a[n,:].argsort()]
[ "def", "sort_matrix", "(", "a", ",", "n", "=", "0", ")", ":", "a", "=", "_n", ".", "array", "(", "a", ")", "return", "a", "[", ":", ",", "a", "[", "n", ",", ":", "]", ".", "argsort", "(", ")", "]" ]
This will rearrange the array a[n] from lowest to highest, and rearrange the rest of a[i]'s in the same way. It is dumb and slow. Returns a numpy array.
[ "This", "will", "rearrange", "the", "array", "a", "[", "n", "]", "from", "lowest", "to", "highest", "and", "rearrange", "the", "rest", "of", "a", "[", "i", "]", "s", "in", "the", "same", "way", ".", "It", "is", "dumb", "and", "slow", "." ]
python
train
olitheolix/qtmacs
qtmacs/applets/logviewer.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/applets/logviewer.py#L196-L246
def qteUpdateLogSlot(self): """ Fetch and display the next batch of log messages. """ # Fetch all log records that have arrived since the last # fetch() call and update the record counter. log = self.logHandler.fetch(start=self.qteLogCnt) self.qteLogCnt += len(log) # Return immediately if no log message is available (this case # should be impossible). if not len(log): return # Remove all duplicate entries and count their repetitions. log_pruned = [] last_entry = log[0] num_rep = -1 for cur_entry in log: # If the previous log message is identical to the current # one increase its repetition counter. If the two log # messages differ, add the last message to the output log # and reset the repetition counter. if last_entry.msg == cur_entry.msg: num_rep += 1 else: log_pruned.append([last_entry, num_rep]) num_rep = 0 last_entry = cur_entry # The very last entry must be added by hand. log_pruned.append([cur_entry, num_rep]) # Format the log entries (eg. color coding etc.) log_formatted = "" for cur_entry in log_pruned: log_formatted += self.qteFormatMessage(cur_entry[0], cur_entry[1]) log_formatted + '\n' # Insert the formatted text all at once as calls to insertHtml # are expensive. self.qteText.insertHtml(log_formatted) self.qteMoveToEndOfBuffer() # If the log contained an error (or something else of interest # to the user) then switch to the messages buffer (ie. switch # to this very applet). if self.qteAutoActivate: self.qteAutoActivate = False self.qteMain.qteMakeAppletActive(self)
[ "def", "qteUpdateLogSlot", "(", "self", ")", ":", "# Fetch all log records that have arrived since the last", "# fetch() call and update the record counter.", "log", "=", "self", ".", "logHandler", ".", "fetch", "(", "start", "=", "self", ".", "qteLogCnt", ")", "self", ...
Fetch and display the next batch of log messages.
[ "Fetch", "and", "display", "the", "next", "batch", "of", "log", "messages", "." ]
python
train
google/grr
grr/server/grr_response_server/frontend_lib.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/frontend_lib.py#L424-L493
def DrainTaskSchedulerQueueForClient(self, client, max_count=None): """Drains the client's Task Scheduler queue. 1) Get all messages in the client queue. 2) Sort these into a set of session_ids. 3) Use data_store.DB.ResolvePrefix() to query all requests. 4) Delete all responses for retransmitted messages (if needed). Args: client: The ClientURN object specifying this client. max_count: The maximum number of messages we will issue for the client. If not given, uses self.max_queue_size . Returns: The tasks respresenting the messages returned. If we can not send them, we can reschedule them for later. """ if max_count is None: max_count = self.max_queue_size if max_count <= 0: return [] client = rdf_client.ClientURN(client) start_time = time.time() # Drain the queue for this client if data_store.RelationalDBEnabled(): action_requests = data_store.REL_DB.LeaseClientActionRequests( client.Basename(), lease_time=rdfvalue.Duration.FromSeconds(self.message_expiry_time), limit=max_count) result = [ rdf_flow_objects.GRRMessageFromClientActionRequest(r) for r in action_requests ] else: new_tasks = queue_manager.QueueManager(token=self.token).QueryAndOwn( queue=client.Queue(), limit=max_count, lease_seconds=self.message_expiry_time) initial_ttl = rdf_flows.GrrMessage().task_ttl check_before_sending = [] result = [] for task in new_tasks: if task.task_ttl < initial_ttl - 1: # This message has been leased before. check_before_sending.append(task) else: result.append(task) if check_before_sending: with queue_manager.QueueManager(token=self.token) as manager: status_found = manager.MultiCheckStatus(check_before_sending) # All messages that don't have a status yet should be sent again. for task in check_before_sending: if task not in status_found: result.append(task) else: manager.DeQueueClientRequest(task) stats_collector_instance.Get().IncrementCounter("grr_messages_sent", len(result)) if result: logging.debug("Drained %d messages for %s in %s seconds.", len(result), client, time.time() - start_time) return result
[ "def", "DrainTaskSchedulerQueueForClient", "(", "self", ",", "client", ",", "max_count", "=", "None", ")", ":", "if", "max_count", "is", "None", ":", "max_count", "=", "self", ".", "max_queue_size", "if", "max_count", "<=", "0", ":", "return", "[", "]", "c...
Drains the client's Task Scheduler queue. 1) Get all messages in the client queue. 2) Sort these into a set of session_ids. 3) Use data_store.DB.ResolvePrefix() to query all requests. 4) Delete all responses for retransmitted messages (if needed). Args: client: The ClientURN object specifying this client. max_count: The maximum number of messages we will issue for the client. If not given, uses self.max_queue_size . Returns: The tasks respresenting the messages returned. If we can not send them, we can reschedule them for later.
[ "Drains", "the", "client", "s", "Task", "Scheduler", "queue", "." ]
python
train
Kortemme-Lab/klab
klab/stats/misc.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/stats/misc.py#L100-L102
def fraction_correct_pandas(dataframe, x_series, y_series, x_cutoff = 1.0, y_cutoff = 1.0, ignore_null_values = False): '''A little (<6%) slower than fraction_correct due to the data extraction overhead.''' return fraction_correct(dataframe[x_series].values.tolist(), dataframe[y_series].values.tolist(), x_cutoff = x_cutoff, y_cutoff = y_cutoff, ignore_null_values = ignore_null_values)
[ "def", "fraction_correct_pandas", "(", "dataframe", ",", "x_series", ",", "y_series", ",", "x_cutoff", "=", "1.0", ",", "y_cutoff", "=", "1.0", ",", "ignore_null_values", "=", "False", ")", ":", "return", "fraction_correct", "(", "dataframe", "[", "x_series", ...
A little (<6%) slower than fraction_correct due to the data extraction overhead.
[ "A", "little", "(", "<6%", ")", "slower", "than", "fraction_correct", "due", "to", "the", "data", "extraction", "overhead", "." ]
python
train
jsmits/django-logutils
django_logutils/middleware.py
https://github.com/jsmits/django-logutils/blob/e88f6e0a08c6f3df9e61f96cfb6cd79bc5ea8a88/django_logutils/middleware.py#L102-L129
def process_response(self, request, response): """ Create the logging message.. """ try: log_dict = create_log_dict(request, response) # add the request time to the log_dict; if no start time is # available, use -1 as NA value request_time = ( time.time() - self.start_time if hasattr(self, 'start_time') and self.start_time else -1) log_dict.update({'request_time': request_time}) is_request_time_too_high = ( request_time > float(settings.LOGUTILS_REQUEST_TIME_THRESHOLD)) use_sql_info = settings.DEBUG or is_request_time_too_high log_msg = create_log_message(log_dict, use_sql_info, fmt=False) if is_request_time_too_high: logger.warning(log_msg, log_dict, extra=log_dict) else: logger.info(log_msg, log_dict, extra=log_dict) except Exception as e: logger.exception(e) return response
[ "def", "process_response", "(", "self", ",", "request", ",", "response", ")", ":", "try", ":", "log_dict", "=", "create_log_dict", "(", "request", ",", "response", ")", "# add the request time to the log_dict; if no start time is", "# available, use -1 as NA value", "requ...
Create the logging message..
[ "Create", "the", "logging", "message", ".." ]
python
train
jeffrimko/Qprompt
lib/qprompt.py
https://github.com/jeffrimko/Qprompt/blob/1887c53656dfecac49e0650e0f912328801cbb83/lib/qprompt.py#L144-L147
def enum(self, desc, func=None, args=None, krgs=None): """Add a menu entry whose name will be an auto indexed number.""" name = str(len(self.entries)+1) self.entries.append(MenuEntry(name, desc, func, args or [], krgs or {}))
[ "def", "enum", "(", "self", ",", "desc", ",", "func", "=", "None", ",", "args", "=", "None", ",", "krgs", "=", "None", ")", ":", "name", "=", "str", "(", "len", "(", "self", ".", "entries", ")", "+", "1", ")", "self", ".", "entries", ".", "ap...
Add a menu entry whose name will be an auto indexed number.
[ "Add", "a", "menu", "entry", "whose", "name", "will", "be", "an", "auto", "indexed", "number", "." ]
python
train
j0057/github-release
github_release.py
https://github.com/j0057/github-release/blob/5421d1ad3e49eaad50c800e548f889d55e159b9d/github_release.py#L148-L161
def _recursive_gh_get(href, items): """Recursively get list of GitHub objects. See https://developer.github.com/v3/guides/traversing-with-pagination/ """ response = _request('GET', href) response.raise_for_status() items.extend(response.json()) if "link" not in response.headers: return links = link_header.parse(response.headers["link"]) rels = {link.rel: link.href for link in links.links} if "next" in rels: _recursive_gh_get(rels["next"], items)
[ "def", "_recursive_gh_get", "(", "href", ",", "items", ")", ":", "response", "=", "_request", "(", "'GET'", ",", "href", ")", "response", ".", "raise_for_status", "(", ")", "items", ".", "extend", "(", "response", ".", "json", "(", ")", ")", "if", "\"l...
Recursively get list of GitHub objects. See https://developer.github.com/v3/guides/traversing-with-pagination/
[ "Recursively", "get", "list", "of", "GitHub", "objects", "." ]
python
train
markovmodel/msmtools
msmtools/analysis/api.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/api.py#L93-L133
def is_transition_matrix(T, tol=1e-12): r"""Check if the given matrix is a transition matrix. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Matrix to check tol : float (optional) Floating point tolerance to check with Returns ------- is_transition_matrix : bool True, if T is a valid transition matrix, False otherwise Notes ----- A valid transition matrix :math:`P=(p_{ij})` has non-negative elements, :math:`p_{ij} \geq 0`, and elements of each row sum up to one, :math:`\sum_j p_{ij} = 1`. Matrices wit this property are also called stochastic matrices. Examples -------- >>> import numpy as np >>> from msmtools.analysis import is_transition_matrix >>> A = np.array([[0.4, 0.5, 0.3], [0.2, 0.4, 0.4], [-1, 1, 1]]) >>> is_transition_matrix(A) False >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> is_transition_matrix(T) True """ T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') if _issparse(T): return sparse.assessment.is_transition_matrix(T, tol) else: return dense.assessment.is_transition_matrix(T, tol)
[ "def", "is_transition_matrix", "(", "T", ",", "tol", "=", "1e-12", ")", ":", "T", "=", "_types", ".", "ensure_ndarray_or_sparse", "(", "T", ",", "ndim", "=", "2", ",", "uniform", "=", "True", ",", "kind", "=", "'numeric'", ")", "if", "_issparse", "(", ...
r"""Check if the given matrix is a transition matrix. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Matrix to check tol : float (optional) Floating point tolerance to check with Returns ------- is_transition_matrix : bool True, if T is a valid transition matrix, False otherwise Notes ----- A valid transition matrix :math:`P=(p_{ij})` has non-negative elements, :math:`p_{ij} \geq 0`, and elements of each row sum up to one, :math:`\sum_j p_{ij} = 1`. Matrices wit this property are also called stochastic matrices. Examples -------- >>> import numpy as np >>> from msmtools.analysis import is_transition_matrix >>> A = np.array([[0.4, 0.5, 0.3], [0.2, 0.4, 0.4], [-1, 1, 1]]) >>> is_transition_matrix(A) False >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> is_transition_matrix(T) True
[ "r", "Check", "if", "the", "given", "matrix", "is", "a", "transition", "matrix", "." ]
python
train
spyder-ide/spyder
spyder/widgets/findreplace.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/findreplace.py#L446-L533
def replace_find(self, focus_replace_text=False, replace_all=False): """Replace and find""" if (self.editor is not None): replace_text = to_text_string(self.replace_text.currentText()) search_text = to_text_string(self.search_text.currentText()) re_pattern = None # Check regexp before proceeding if self.re_button.isChecked(): try: re_pattern = re.compile(search_text) # Check if replace_text can be substituted in re_pattern # Fixes issue #7177 re_pattern.sub(replace_text, '') except re.error: # Do nothing with an invalid regexp return case = self.case_button.isChecked() first = True cursor = None while True: if first: # First found seltxt = to_text_string(self.editor.get_selected_text()) cmptxt1 = search_text if case else search_text.lower() cmptxt2 = seltxt if case else seltxt.lower() if re_pattern is None: has_selected = self.editor.has_selected_text() if has_selected and cmptxt1 == cmptxt2: # Text was already found, do nothing pass else: if not self.find(changed=False, forward=True, rehighlight=False): break else: if len(re_pattern.findall(cmptxt2)) > 0: pass else: if not self.find(changed=False, forward=True, rehighlight=False): break first = False wrapped = False position = self.editor.get_position('cursor') position0 = position cursor = self.editor.textCursor() cursor.beginEditBlock() else: position1 = self.editor.get_position('cursor') if is_position_inf(position1, position0 + len(replace_text) - len(search_text) + 1): # Identify wrapping even when the replace string # includes part of the search string wrapped = True if wrapped: if position1 == position or \ is_position_sup(position1, position): # Avoid infinite loop: replace string includes # part of the search string break if position1 == position0: # Avoid infinite loop: single found occurrence break position0 = position1 if re_pattern is None: cursor.removeSelectedText() cursor.insertText(replace_text) else: seltxt = to_text_string(cursor.selectedText()) cursor.removeSelectedText() cursor.insertText(re_pattern.sub(replace_text, seltxt)) if self.find_next(): found_cursor = self.editor.textCursor() cursor.setPosition(found_cursor.selectionStart(), QTextCursor.MoveAnchor) cursor.setPosition(found_cursor.selectionEnd(), QTextCursor.KeepAnchor) else: break if not replace_all: break if cursor is not None: cursor.endEditBlock() if focus_replace_text: self.replace_text.setFocus()
[ "def", "replace_find", "(", "self", ",", "focus_replace_text", "=", "False", ",", "replace_all", "=", "False", ")", ":", "if", "(", "self", ".", "editor", "is", "not", "None", ")", ":", "replace_text", "=", "to_text_string", "(", "self", ".", "replace_text...
Replace and find
[ "Replace", "and", "find" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L8950-L8974
def nplnpt(linpt, lindir, point): """ Find the nearest point on a line to a specified point, and find the distance between the two points. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/nplnpt_c.html :param linpt: Point on a line :type linpt: 3-Element Array of floats :param lindir: line's direction vector :type lindir: 3-Element Array of floats :param point: A second point. :type point: 3-Element Array of floats :return: Nearest point on the line to point, Distance between point and pnear :rtype: tuple """ linpt = stypes.toDoubleVector(linpt) lindir = stypes.toDoubleVector(lindir) point = stypes.toDoubleVector(point) pnear = stypes.emptyDoubleVector(3) dist = ctypes.c_double() libspice.nplnpt_c(linpt, lindir, point, pnear, ctypes.byref(dist)) return stypes.cVectorToPython(pnear), dist.value
[ "def", "nplnpt", "(", "linpt", ",", "lindir", ",", "point", ")", ":", "linpt", "=", "stypes", ".", "toDoubleVector", "(", "linpt", ")", "lindir", "=", "stypes", ".", "toDoubleVector", "(", "lindir", ")", "point", "=", "stypes", ".", "toDoubleVector", "("...
Find the nearest point on a line to a specified point, and find the distance between the two points. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/nplnpt_c.html :param linpt: Point on a line :type linpt: 3-Element Array of floats :param lindir: line's direction vector :type lindir: 3-Element Array of floats :param point: A second point. :type point: 3-Element Array of floats :return: Nearest point on the line to point, Distance between point and pnear :rtype: tuple
[ "Find", "the", "nearest", "point", "on", "a", "line", "to", "a", "specified", "point", "and", "find", "the", "distance", "between", "the", "two", "points", "." ]
python
train
flowersteam/explauto
explauto/agent/agent.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/agent/agent.py#L31-L59
def from_classes(cls, im_model_cls, im_model_config, expl_dims, sm_model_cls, sm_model_config, inf_dims, m_mins, m_maxs, s_mins, s_maxs, n_bootstrap=0, context_mode=None): """Initialize agent class :param class im_model_cls: a subclass of InterestedModel, as those registered in the interest_model package :param dict im_model_config: a configuration dict as those registered in the interest_model package :param list expl_dims: the sensorimotor dimensions where exploration is driven in the interest model :param list inf_dims: the output sensorimotor dimensions of the sensorimotor model (input being expl_dims) :param class sm_model_cls: a subclass of SensorimotorModel, as those registered in the sensorimotor_model package :param dict sensorimotor_model_config: a configuration dict as those registered in the sensorimotor_model package :param list m_mins, m_maxs, s_mins, s_max: lower and upper bounds of motor and sensory values on each dimension """ conf = make_configuration(m_mins, m_maxs, s_mins, s_maxs) sm_model = sm_model_cls(conf, **sm_model_config) im_model = im_model_cls(conf, expl_dims, **im_model_config) return cls(conf, sm_model, im_model, n_bootstrap, context_mode)
[ "def", "from_classes", "(", "cls", ",", "im_model_cls", ",", "im_model_config", ",", "expl_dims", ",", "sm_model_cls", ",", "sm_model_config", ",", "inf_dims", ",", "m_mins", ",", "m_maxs", ",", "s_mins", ",", "s_maxs", ",", "n_bootstrap", "=", "0", ",", "co...
Initialize agent class :param class im_model_cls: a subclass of InterestedModel, as those registered in the interest_model package :param dict im_model_config: a configuration dict as those registered in the interest_model package :param list expl_dims: the sensorimotor dimensions where exploration is driven in the interest model :param list inf_dims: the output sensorimotor dimensions of the sensorimotor model (input being expl_dims) :param class sm_model_cls: a subclass of SensorimotorModel, as those registered in the sensorimotor_model package :param dict sensorimotor_model_config: a configuration dict as those registered in the sensorimotor_model package :param list m_mins, m_maxs, s_mins, s_max: lower and upper bounds of motor and sensory values on each dimension
[ "Initialize", "agent", "class" ]
python
train