repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
fictorial/pygameui
pygameui/view.py
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/view.py#L74-L91
def layout(self): """Call to have the view layout itself. Subclasses should invoke this after laying out child views and/or updating its own frame. """ if self.shadowed: shadow_size = theme.current.shadow_size shadowed_frame_size = (self.frame.w + shadow_size, self.frame.h + shadow_size) self.surface = pygame.Surface( shadowed_frame_size, pygame.SRCALPHA, 32) shadow_image = resource.get_image('shadow') self.shadow_image = resource.scale_image(shadow_image, shadowed_frame_size) else: self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32) self.shadow_image = None
[ "def", "layout", "(", "self", ")", ":", "if", "self", ".", "shadowed", ":", "shadow_size", "=", "theme", ".", "current", ".", "shadow_size", "shadowed_frame_size", "=", "(", "self", ".", "frame", ".", "w", "+", "shadow_size", ",", "self", ".", "frame", ...
Call to have the view layout itself. Subclasses should invoke this after laying out child views and/or updating its own frame.
[ "Call", "to", "have", "the", "view", "layout", "itself", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xorbrecordbox.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbrecordbox.py#L282-L290
def currentRecord( self ): """ Returns the record found at the current index for this combo box. :rerturn <orb.Table> || None """ if self._currentRecord is None and self.isRequired(): self._currentRecord = self.recordAt(self.currentIndex()) return self._currentRecord
[ "def", "currentRecord", "(", "self", ")", ":", "if", "self", ".", "_currentRecord", "is", "None", "and", "self", ".", "isRequired", "(", ")", ":", "self", ".", "_currentRecord", "=", "self", ".", "recordAt", "(", "self", ".", "currentIndex", "(", ")", ...
Returns the record found at the current index for this combo box. :rerturn <orb.Table> || None
[ "Returns", "the", "record", "found", "at", "the", "current", "index", "for", "this", "combo", "box", ".", ":", "rerturn", "<orb", ".", "Table", ">", "||", "None" ]
python
train
wbond/certvalidator
certvalidator/validate.py
https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/validate.py#L53-L107
def validate_tls_hostname(validation_context, cert, hostname): """ Validates the end-entity certificate from a certvalidator.path.ValidationPath object to ensure that the certificate is valid for the hostname provided and that the certificate is valid for the purpose of a TLS connection. THE CERTIFICATE PATH MUST BE VALIDATED SEPARATELY VIA validate_path()! :param validation_context: A certvalidator.context.ValidationContext object to use for configuring validation behavior :param cert: An asn1crypto.x509.Certificate object returned from validate_path() :param hostname: A unicode string of the TLS server hostname :raises: certvalidator.errors.InvalidCertificateError - when the certificate is not valid for TLS or the hostname """ if not isinstance(validation_context, ValidationContext): raise TypeError(pretty_message( ''' validation_context must be an instance of certvalidator.context.ValidationContext, not %s ''', type_name(validation_context) )) if validation_context.is_whitelisted(cert): return if not cert.is_valid_domain_ip(hostname): raise InvalidCertificateError(pretty_message( ''' The X.509 certificate provided is not valid for %s. Valid hostnames include: %s ''', hostname, ', '.join(cert.valid_domains) )) bad_key_usage = cert.key_usage_value and 'digital_signature' not in cert.key_usage_value.native bad_ext_key_usage = cert.extended_key_usage_value and 'server_auth' not in cert.extended_key_usage_value.native if bad_key_usage or bad_ext_key_usage: raise InvalidCertificateError(pretty_message( ''' The X.509 certificate provided is not valid for securing TLS connections ''' ))
[ "def", "validate_tls_hostname", "(", "validation_context", ",", "cert", ",", "hostname", ")", ":", "if", "not", "isinstance", "(", "validation_context", ",", "ValidationContext", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n validation_...
Validates the end-entity certificate from a certvalidator.path.ValidationPath object to ensure that the certificate is valid for the hostname provided and that the certificate is valid for the purpose of a TLS connection. THE CERTIFICATE PATH MUST BE VALIDATED SEPARATELY VIA validate_path()! :param validation_context: A certvalidator.context.ValidationContext object to use for configuring validation behavior :param cert: An asn1crypto.x509.Certificate object returned from validate_path() :param hostname: A unicode string of the TLS server hostname :raises: certvalidator.errors.InvalidCertificateError - when the certificate is not valid for TLS or the hostname
[ "Validates", "the", "end", "-", "entity", "certificate", "from", "a", "certvalidator", ".", "path", ".", "ValidationPath", "object", "to", "ensure", "that", "the", "certificate", "is", "valid", "for", "the", "hostname", "provided", "and", "that", "the", "certi...
python
train
openknowledge-archive/flexidate
flexidate/__init__.py
https://github.com/openknowledge-archive/flexidate/blob/d4fb7d6c7786725bd892fbccd8c3837ac45bcb67/flexidate/__init__.py#L126-L142
def as_float(self): '''Get as a float (year being the integer part). Replace '?' in year with 9 so as to be conservative (e.g. 19?? becomes 1999) and elsewhere (month, day) with 0 @return: float. ''' if not self.year: return None out = float(self.year.replace('?', '9')) if self.month: # TODO: we are assuming months are of equal length out += float(self.month.replace('?', '0')) / 12.0 if self.day: out += float(self.day.replace('?', '0')) / 365.0 return out
[ "def", "as_float", "(", "self", ")", ":", "if", "not", "self", ".", "year", ":", "return", "None", "out", "=", "float", "(", "self", ".", "year", ".", "replace", "(", "'?'", ",", "'9'", ")", ")", "if", "self", ".", "month", ":", "# TODO: we are ass...
Get as a float (year being the integer part). Replace '?' in year with 9 so as to be conservative (e.g. 19?? becomes 1999) and elsewhere (month, day) with 0 @return: float.
[ "Get", "as", "a", "float", "(", "year", "being", "the", "integer", "part", ")", "." ]
python
train
wmayner/pyphi
pyphi/compute/subsystem.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/compute/subsystem.py#L39-L52
def compute(mechanism, subsystem, purviews, cause_purviews, effect_purviews): """Compute a |Concept| for a mechanism, in this |Subsystem| with the provided purviews. """ concept = subsystem.concept(mechanism, purviews=purviews, cause_purviews=cause_purviews, effect_purviews=effect_purviews) # Don't serialize the subsystem. # This is replaced on the other side of the queue, and ensures # that all concepts in the CES reference the same subsystem. concept.subsystem = None return concept
[ "def", "compute", "(", "mechanism", ",", "subsystem", ",", "purviews", ",", "cause_purviews", ",", "effect_purviews", ")", ":", "concept", "=", "subsystem", ".", "concept", "(", "mechanism", ",", "purviews", "=", "purviews", ",", "cause_purviews", "=", "cause_...
Compute a |Concept| for a mechanism, in this |Subsystem| with the provided purviews.
[ "Compute", "a", "|Concept|", "for", "a", "mechanism", "in", "this", "|Subsystem|", "with", "the", "provided", "purviews", "." ]
python
train
gem/oq-engine
openquake/calculators/views.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/views.py#L397-L407
def view_portfolio_loss(token, dstore): """ The mean and stddev loss for the full portfolio for each loss type, extracted from the event loss table, averaged over the realizations """ data = portfolio_loss(dstore) # shape (R, L) loss_types = list(dstore['oqparam'].loss_dt().names) header = ['portfolio_loss'] + loss_types mean = ['mean'] + [row.mean() for row in data.T] stddev = ['stddev'] + [row.std(ddof=1) for row in data.T] return rst_table([mean, stddev], header)
[ "def", "view_portfolio_loss", "(", "token", ",", "dstore", ")", ":", "data", "=", "portfolio_loss", "(", "dstore", ")", "# shape (R, L)", "loss_types", "=", "list", "(", "dstore", "[", "'oqparam'", "]", ".", "loss_dt", "(", ")", ".", "names", ")", "header"...
The mean and stddev loss for the full portfolio for each loss type, extracted from the event loss table, averaged over the realizations
[ "The", "mean", "and", "stddev", "loss", "for", "the", "full", "portfolio", "for", "each", "loss", "type", "extracted", "from", "the", "event", "loss", "table", "averaged", "over", "the", "realizations" ]
python
train
jaysonsantos/python-binary-memcached
bmemcached/client/mixin.py
https://github.com/jaysonsantos/python-binary-memcached/blob/6a792829349c69204d9c5045e5c34b4231216dd6/bmemcached/client/mixin.py#L48-L70
def set_servers(self, servers): """ Iter to a list of servers and instantiate Protocol class. :param servers: A list of servers :type servers: list :return: Returns nothing :rtype: None """ if isinstance(servers, six.string_types): servers = [servers] assert servers, "No memcached servers supplied" self._servers = [Protocol( server=server, username=self.username, password=self.password, compression=self.compression, socket_timeout=self.socket_timeout, pickle_protocol=self.pickle_protocol, pickler=self.pickler, unpickler=self.unpickler, ) for server in servers]
[ "def", "set_servers", "(", "self", ",", "servers", ")", ":", "if", "isinstance", "(", "servers", ",", "six", ".", "string_types", ")", ":", "servers", "=", "[", "servers", "]", "assert", "servers", ",", "\"No memcached servers supplied\"", "self", ".", "_ser...
Iter to a list of servers and instantiate Protocol class. :param servers: A list of servers :type servers: list :return: Returns nothing :rtype: None
[ "Iter", "to", "a", "list", "of", "servers", "and", "instantiate", "Protocol", "class", "." ]
python
train
dropbox/stone
stone/frontend/parser.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/parser.py#L478-L480
def p_tag_ref(self, p): 'tag_ref : ID' p[0] = AstTagRef(self.path, p.lineno(1), p.lexpos(1), p[1])
[ "def", "p_tag_ref", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "AstTagRef", "(", "self", ".", "path", ",", "p", ".", "lineno", "(", "1", ")", ",", "p", ".", "lexpos", "(", "1", ")", ",", "p", "[", "1", "]", ")" ]
tag_ref : ID
[ "tag_ref", ":", "ID" ]
python
train
ryanjdillon/pyotelem
pyotelem/plots/plotutils.py
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotutils.py#L104-L179
def add_alpha_labels(axes, xpos=0.03, ypos=0.95, suffix='', color=None, fontsize=14, fontweight='normal', boxstyle='square', facecolor='white', edgecolor='white', alpha=1.0): '''Add sequential alphbet labels to subplot axes Args ---- axes: list of pyplot.ax A list of matplotlib axes to add the label labels to xpos: float or array_like X position(s) of labels in figure coordinates ypos: float or array_like Y position(s) of labels in figure coordinates suffix: str String to append to labels (e.g. '.' or ' name) color: matplotlib color Color of labels fontsize: int Alppa fontsize fontweight: matplotlib fontweight Alpha fontweight boxstyle: matplotlib boxstyle Alpha boxstyle facecolor: matplotlib facecolor Color of box containing label edgecolor: matplotlib edgecolor Color of box'es border containing label alpha: float Transparency of label Returns ------- axes: list of pyplot.ax A list of matplotlib axes objects with alpha labels added ''' import seaborn import string import numpy if not numpy.iterable(xpos): xpos = [xpos,]*len(axes) ypos = [ypos,]*len(axes) if (len(xpos) > 1) or (len(ypos) > 1): try: assert (len(axes) == len(xpos)) except AssertionError as e: e.args += 'xpos iterable must be same length as axes' raise try: assert (len(axes) == len(ypos)) except AssertionError as e: e.args += 'ypos iterable must be same length as axes' raise else: xpos = [xpos,] ypos = [ypos,] colors = seaborn.color_palette() abc = string.ascii_uppercase for i, (label, ax) in enumerate(zip(abc[:len(axes)], axes)): if color is None: color = colors[i] kwargs = dict(color=color, fontweight=fontweight,) bbox = dict(boxstyle=boxstyle, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha) ax.text(xpos[i], ypos[i], '{}{}'.format(label, suffix), transform=ax.transAxes, fontsize=fontsize, verticalalignment='top', bbox=bbox, **kwargs) return axes
[ "def", "add_alpha_labels", "(", "axes", ",", "xpos", "=", "0.03", ",", "ypos", "=", "0.95", ",", "suffix", "=", "''", ",", "color", "=", "None", ",", "fontsize", "=", "14", ",", "fontweight", "=", "'normal'", ",", "boxstyle", "=", "'square'", ",", "f...
Add sequential alphbet labels to subplot axes Args ---- axes: list of pyplot.ax A list of matplotlib axes to add the label labels to xpos: float or array_like X position(s) of labels in figure coordinates ypos: float or array_like Y position(s) of labels in figure coordinates suffix: str String to append to labels (e.g. '.' or ' name) color: matplotlib color Color of labels fontsize: int Alppa fontsize fontweight: matplotlib fontweight Alpha fontweight boxstyle: matplotlib boxstyle Alpha boxstyle facecolor: matplotlib facecolor Color of box containing label edgecolor: matplotlib edgecolor Color of box'es border containing label alpha: float Transparency of label Returns ------- axes: list of pyplot.ax A list of matplotlib axes objects with alpha labels added
[ "Add", "sequential", "alphbet", "labels", "to", "subplot", "axes" ]
python
train
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L336-L346
def save(self, filename=None): """ saves the configuration in the given filename, if it is none the filename at load time is used. :param filename: the file name :type filename: string :return: """ content = self.data.yaml() with open(Config.path_expand(ConfigDict.filename), 'w') as f: f.write(content)
[ "def", "save", "(", "self", ",", "filename", "=", "None", ")", ":", "content", "=", "self", ".", "data", ".", "yaml", "(", ")", "with", "open", "(", "Config", ".", "path_expand", "(", "ConfigDict", ".", "filename", ")", ",", "'w'", ")", "as", "f", ...
saves the configuration in the given filename, if it is none the filename at load time is used. :param filename: the file name :type filename: string :return:
[ "saves", "the", "configuration", "in", "the", "given", "filename", "if", "it", "is", "none", "the", "filename", "at", "load", "time", "is", "used", ".", ":", "param", "filename", ":", "the", "file", "name", ":", "type", "filename", ":", "string", ":", ...
python
train
spyder-ide/spyder
spyder/config/user.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/config/user.py#L417-L425
def set_default(self, section, option, default_value): """ Set Default value for a given (section, option) -> called when a new (section, option) is set and no default exists """ section = self._check_section_option(section, option) for sec, options in self.defaults: if sec == section: options[ option ] = default_value
[ "def", "set_default", "(", "self", ",", "section", ",", "option", ",", "default_value", ")", ":", "section", "=", "self", ".", "_check_section_option", "(", "section", ",", "option", ")", "for", "sec", ",", "options", "in", "self", ".", "defaults", ":", ...
Set Default value for a given (section, option) -> called when a new (section, option) is set and no default exists
[ "Set", "Default", "value", "for", "a", "given", "(", "section", "option", ")", "-", ">", "called", "when", "a", "new", "(", "section", "option", ")", "is", "set", "and", "no", "default", "exists" ]
python
train
InfoAgeTech/django-core
django_core/forms/widgets.py
https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/forms/widgets.py#L78-L85
def get_widget_css_class(self, attrs): """Gets the class for the widget.""" size_class = 'size-{0}'.format(self.num_inputs) if 'class' in attrs: attrs['class'] += ' {0}'.format(size_class) else: attrs['class'] = size_class
[ "def", "get_widget_css_class", "(", "self", ",", "attrs", ")", ":", "size_class", "=", "'size-{0}'", ".", "format", "(", "self", ".", "num_inputs", ")", "if", "'class'", "in", "attrs", ":", "attrs", "[", "'class'", "]", "+=", "' {0}'", ".", "format", "("...
Gets the class for the widget.
[ "Gets", "the", "class", "for", "the", "widget", "." ]
python
train
neighbordog/deviantart
deviantart/api.py
https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L299-L312
def get_categories(self, catpath="/"): """Fetch the categorytree :param catpath: The category to list children of """ response = self._req('/browse/categorytree', { "catpath":catpath }) categories = response['categories'] return categories
[ "def", "get_categories", "(", "self", ",", "catpath", "=", "\"/\"", ")", ":", "response", "=", "self", ".", "_req", "(", "'/browse/categorytree'", ",", "{", "\"catpath\"", ":", "catpath", "}", ")", "categories", "=", "response", "[", "'categories'", "]", "...
Fetch the categorytree :param catpath: The category to list children of
[ "Fetch", "the", "categorytree" ]
python
train
rflamary/POT
ot/lp/cvx.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/lp/cvx.py#L22-L26
def scipy_sparse_to_spmatrix(A): """Efficient conversion from scipy sparse matrix to cvxopt sparse matrix""" coo = A.tocoo() SP = spmatrix(coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=A.shape) return SP
[ "def", "scipy_sparse_to_spmatrix", "(", "A", ")", ":", "coo", "=", "A", ".", "tocoo", "(", ")", "SP", "=", "spmatrix", "(", "coo", ".", "data", ".", "tolist", "(", ")", ",", "coo", ".", "row", ".", "tolist", "(", ")", ",", "coo", ".", "col", "....
Efficient conversion from scipy sparse matrix to cvxopt sparse matrix
[ "Efficient", "conversion", "from", "scipy", "sparse", "matrix", "to", "cvxopt", "sparse", "matrix" ]
python
train
bitcraze/crazyflie-lib-python
cflib/crazyflie/__init__.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/__init__.py#L356-L359
def add_port_callback(self, port, cb): """Add a callback for data that comes on a specific port""" logger.debug('Adding callback on port [%d] to [%s]', port, cb) self.add_header_callback(cb, port, 0, 0xff, 0x0)
[ "def", "add_port_callback", "(", "self", ",", "port", ",", "cb", ")", ":", "logger", ".", "debug", "(", "'Adding callback on port [%d] to [%s]'", ",", "port", ",", "cb", ")", "self", ".", "add_header_callback", "(", "cb", ",", "port", ",", "0", ",", "0xff"...
Add a callback for data that comes on a specific port
[ "Add", "a", "callback", "for", "data", "that", "comes", "on", "a", "specific", "port" ]
python
train
dottedmag/pychm
chm/chm.py
https://github.com/dottedmag/pychm/blob/fd87831a8c23498e65304fce341718bd2968211b/chm/chm.py#L238-L319
def GetArchiveInfo(self): '''Obtains information on CHM archive. This function checks the /#SYSTEM file inside the CHM archive to obtain the index, home page, topics, encoding and title. It is called from LoadCHM. ''' self.searchable = extra.is_searchable(self.file) self.lcid = None result, ui = chmlib.chm_resolve_object(self.file, '/#SYSTEM') if (result != chmlib.CHM_RESOLVE_SUCCESS): sys.stderr.write('GetArchiveInfo: #SYSTEM does not exist\n') return 0 size, text = chmlib.chm_retrieve_object(self.file, ui, 4l, ui.length) if (size == 0): sys.stderr.write('GetArchiveInfo: file size = 0\n') return 0 buff = array.array('B', text) index = 0 while (index < size): cursor = buff[index] + (buff[index+1] * 256) if (cursor == 0): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.topics = '/' + text[index:index+cursor-1] elif (cursor == 1): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.index = '/' + text[index:index+cursor-1] elif (cursor == 2): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.home = '/' + text[index:index+cursor-1] elif (cursor == 3): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.title = text[index:index+cursor-1] elif (cursor == 4): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.lcid = buff[index] + (buff[index+1] * 256) elif (cursor == 6): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 tmp = text[index:index+cursor-1] if not self.topics: tmp1 = '/' + tmp + '.hhc' tmp2 = '/' + tmp + '.hhk' res1, ui1 = chmlib.chm_resolve_object(self.file, tmp1) res2, ui2 = chmlib.chm_resolve_object(self.file, tmp2) if not self.topics and res1 == chmlib.CHM_RESOLVE_SUCCESS: self.topics = '/' + tmp + '.hhc' if not self.index and res2 == chmlib.CHM_RESOLVE_SUCCESS: self.index = '/' + tmp + '.hhk' elif (cursor == 16): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.encoding = text[index:index+cursor-1] else: index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 index += cursor self.GetWindowsInfo() if not self.lcid: self.lcid = extra.get_lcid(self.file) return 1
[ "def", "GetArchiveInfo", "(", "self", ")", ":", "self", ".", "searchable", "=", "extra", ".", "is_searchable", "(", "self", ".", "file", ")", "self", ".", "lcid", "=", "None", "result", ",", "ui", "=", "chmlib", ".", "chm_resolve_object", "(", "self", ...
Obtains information on CHM archive. This function checks the /#SYSTEM file inside the CHM archive to obtain the index, home page, topics, encoding and title. It is called from LoadCHM.
[ "Obtains", "information", "on", "CHM", "archive", ".", "This", "function", "checks", "the", "/", "#SYSTEM", "file", "inside", "the", "CHM", "archive", "to", "obtain", "the", "index", "home", "page", "topics", "encoding", "and", "title", ".", "It", "is", "c...
python
train
vaexio/vaex
packages/vaex-astro/vaex/astro/export.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-astro/vaex/astro/export.py#L23-L107
def export_hdf5_v1(dataset, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True): """ :param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :return: """ if selection: if selection == True: # easier to work with the name selection = "default" # first open file using h5py api with h5py.File(path, "w") as h5file_output: h5data_output = h5file_output.require_group("data") # i1, i2 = dataset.current_slice N = len(dataset) if not selection else dataset.selected_length(selection) if N == 0: raise ValueError("Cannot export empty table") logger.debug("virtual=%r", virtual) logger.debug("exporting %d rows to file %s" % (N, path)) # column_names = column_names or (dataset.get_column_names() + (list(dataset.virtual_columns.keys()) if virtual else [])) column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True) logger.debug("exporting columns(hdf5): %r" % column_names) for column_name in column_names: if column_name in dataset.get_column_names(strings=True): column = dataset.columns[column_name] shape = (N,) + column.shape[1:] dtype = column.dtype else: dtype = np.float64().dtype shape = (N,) if dtype.type == np.datetime64: array = h5file_output.require_dataset("/data/%s" % column_name, shape=shape, dtype=np.int64) array.attrs["dtype"] = dtype.name else: try: array = h5file_output.require_dataset("/data/%s" % column_name, shape=shape, dtype=dtype.newbyteorder(byteorder)) except: logging.exception("error creating dataset for %r, with type %r " % (column_name, dtype)) array[0] = array[0] # make sure the array really exists random_index_name = None column_order = list(column_names) # copy if shuffle: random_index_name = "random_index" while random_index_name in dataset.get_column_names(): random_index_name += "_new" shuffle_array = h5file_output.require_dataset("/data/" + random_index_name, shape=(N,), dtype=byteorder + "i8") shuffle_array[0] = shuffle_array[0] column_order.append(random_index_name) # last item h5data_output.attrs["column_order"] = ",".join(column_order) # keep track or the ordering of columns # after this the file is closed,, and reopen it using out class dataset_output = vaex.hdf5.dataset.Hdf5MemoryMapped(path, write=True) column_names = vaex.export._export(dataset_input=dataset, dataset_output=dataset_output, path=path, random_index_column=random_index_name, column_names=column_names, selection=selection, shuffle=shuffle, byteorder=byteorder, progress=progress) import getpass import datetime user = getpass.getuser() date = str(datetime.datetime.now()) source = dataset.path description = "file exported by vaex, by user %s, on date %s, from source %s" % (user, date, source) if dataset.description: description += "previous description:\n" + dataset.description dataset_output.copy_metadata(dataset) dataset_output.description = description logger.debug("writing meta information") dataset_output.write_meta() dataset_output.close_files() return
[ "def", "export_hdf5_v1", "(", "dataset", ",", "path", ",", "column_names", "=", "None", ",", "byteorder", "=", "\"=\"", ",", "shuffle", "=", "False", ",", "selection", "=", "False", ",", "progress", "=", "None", ",", "virtual", "=", "True", ")", ":", "...
:param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :return:
[ ":", "param", "DatasetLocal", "dataset", ":", "dataset", "to", "export", ":", "param", "str", "path", ":", "path", "for", "file", ":", "param", "lis", "[", "str", "]", "column_names", ":", "list", "of", "column", "names", "to", "export", "or", "None", ...
python
test
DLR-RM/RAFCON
source/rafcon/gui/utils/notification_overview.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/utils/notification_overview.py#L114-L269
def get_nice_info_dict_string(self, info, level='\t', overview=None): """ Inserts all elements of a notification info-dictionary of gtkmvc3 or a Signal into one string and indicates levels of calls defined by 'kwargs'. Additionally, the elements get structured into a dict that holds all levels of the general notification key-value pairs in faster accessible lists. The dictionary has the element 'type' and the general elements {'model': [], 'prop_name': [], 'instance': [], 'method_name': [], 'args': [], 'kwargs': []}) plus specific elements according the type. Type is always one of the following list ['before', 'after', 'signal']. """ def get_nice_meta_signal_msg_tuple_string(meta_signal_msg_tuple, level, overview): meta_signal_dict = {} # origin s = "\n{0}origin={1}".format(level + "\t", meta_signal_msg_tuple.origin) meta_signal_dict['origin'] = meta_signal_msg_tuple.origin # change s += "\n{0}change={1}".format(level + "\t", meta_signal_msg_tuple.change) meta_signal_dict['change'] = meta_signal_msg_tuple.change # affects_children s += "\n{0}affects_children={1}".format(level + "\t", meta_signal_msg_tuple.affects_children) meta_signal_dict['affects_children'] = meta_signal_msg_tuple.affects_children overview['signal'].append(meta_signal_dict) # notification (tuple) notification_dict = {} meta_signal_dict['notification'] = notification_dict if meta_signal_msg_tuple.notification is None: s += "\n{0}notification={1}".format(level + "\t", meta_signal_msg_tuple.notification) else: s += "\n{0}notification=Notification(".format(level + "\t") # model notification_dict['model'] = meta_signal_msg_tuple.notification.model s += "\n{0}model={1}".format(level + "\t\t", meta_signal_msg_tuple.notification.model) # prop_name notification_dict['prop_name'] = meta_signal_msg_tuple.notification.prop_name s += "\n{0}prop_name={1}".format(level + "\t\t", meta_signal_msg_tuple.notification.prop_name) # info notification_dict['info'] = meta_signal_msg_tuple.notification.info overview['kwargs'].append(meta_signal_msg_tuple.notification.info) s += "\n{0}info=\n{1}{0}\n".format(level + "\t\t", self.get_nice_info_dict_string(meta_signal_msg_tuple.notification.info, level+'\t\t\t', overview)) return s def get_nice_action_signal_msg_tuple_string(meta_signal_msg_tuple, level, overview): meta_signal_dict = {} # after s = "\n{0}after={1}".format(level + "\t", meta_signal_msg_tuple.after) meta_signal_dict['after'] = meta_signal_msg_tuple.after # action s += "\n{0}action={1}".format(level + "\t", meta_signal_msg_tuple.action) meta_signal_dict['action'] = meta_signal_msg_tuple.action # origin s += "\n{0}origin={1}".format(level + "\t", meta_signal_msg_tuple.origin) meta_signal_dict['origin'] = meta_signal_msg_tuple.origin # origin s += "\n{0}action_parent_m={1}".format(level + "\t", meta_signal_msg_tuple.action_parent_m) meta_signal_dict['action_parent_m'] = meta_signal_msg_tuple.origin # change s += "\n{0}affected_models={1}".format(level + "\t", meta_signal_msg_tuple.affected_models) meta_signal_dict['affected_models'] = meta_signal_msg_tuple.affected_models if meta_signal_msg_tuple.after: s += "\n{0}result={1}".format(level + "\t", meta_signal_msg_tuple.result) meta_signal_dict['result'] = meta_signal_msg_tuple.result return s overview_was_none = False if overview is None: overview_was_none = True overview = dict({'model': [], 'prop_name': [], 'instance': [], 'method_name': [], 'args': [], 'kwargs': []}) overview['others'] = [] overview['info'] = [] if 'before' in info: overview['type'] = 'before' elif 'after' in info: overview['type'] = 'after' overview['result'] = [] else: # 'signal' in info: overview['type'] = 'signal' overview['signal'] = [] if ('after' in info or 'before' in info or 'signal' in info) and 'model' in info: if 'before' in info: s = "{0}'before': {1}".format(level, info['before']) elif 'after' in info: s = "{0}'after': {1}".format(level, info['after']) else: s = "{0}'signal': {1}".format(level, info['signal']) else: return str(info) overview['info'].append(info) # model s += "\n{0}'model': {1}".format(level, info['model']) overview['model'].append(info['model']) # prop_name s += "\n{0}'prop_name': {1}".format(level, info['prop_name']) overview['prop_name'].append(info['prop_name']) if not overview['type'] == 'signal': # instance s += "\n{0}'instance': {1}".format(level, info['instance']) overview['instance'].append(info['instance']) # method_name s += "\n{0}'method_name': {1}".format(level, info['method_name']) overview['method_name'].append(info['method_name']) # args s += "\n{0}'args': {1}".format(level, info['args']) overview['args'].append(info['args']) overview['kwargs'].append(info['kwargs']) if overview['type'] == 'after': overview['result'].append(info['result']) # kwargs s += "\n{0}'kwargs': {1}".format(level, self.get_nice_info_dict_string(info['kwargs'], level + "\t", overview)) if overview['type'] == 'after': s += "\n{0}'result': {1}".format(level, info['result']) # additional elements not created by gtkmvc3 or common function calls overview['others'].append({}) for key, value in info.items(): if key in ['before', 'after', 'model', 'prop_name', 'instance', 'method_name', 'args', 'kwargs', 'result']: pass else: s += "\n{0}'{2}': {1}".format(level, info[key], key) overview['others'][len(overview['others'])-1][key] = info[key] else: overview['kwargs'].append({}) # print(info) # print(info['arg']) if isinstance(info['arg'], MetaSignalMsg): overview['signal'].append(info['arg']) s += "\n{0}'arg': MetaSignalMsg({1}".format(level, get_nice_meta_signal_msg_tuple_string(info['arg'], level, overview)) elif isinstance(info['arg'], ActionSignalMsg): overview['instance'].append(info['arg'].action_parent_m.core_element) overview['method_name'].append(info['arg'].action) overview['signal'].append(info['arg']) overview['kwargs'].append(info['arg'].kwargs) # TODO check again this stuff args = [info['arg'].action_parent_m.core_element, ] args.extend(info['arg'].kwargs.values()) overview['args'].append(args) s += "\n{0}'arg': ActionSignalMsg({1}".format(level, get_nice_action_signal_msg_tuple_string(info['arg'], level, overview)) else: raise str(info) if overview_was_none: return s, overview else: return s
[ "def", "get_nice_info_dict_string", "(", "self", ",", "info", ",", "level", "=", "'\\t'", ",", "overview", "=", "None", ")", ":", "def", "get_nice_meta_signal_msg_tuple_string", "(", "meta_signal_msg_tuple", ",", "level", ",", "overview", ")", ":", "meta_signal_di...
Inserts all elements of a notification info-dictionary of gtkmvc3 or a Signal into one string and indicates levels of calls defined by 'kwargs'. Additionally, the elements get structured into a dict that holds all levels of the general notification key-value pairs in faster accessible lists. The dictionary has the element 'type' and the general elements {'model': [], 'prop_name': [], 'instance': [], 'method_name': [], 'args': [], 'kwargs': []}) plus specific elements according the type. Type is always one of the following list ['before', 'after', 'signal'].
[ "Inserts", "all", "elements", "of", "a", "notification", "info", "-", "dictionary", "of", "gtkmvc3", "or", "a", "Signal", "into", "one", "string", "and", "indicates", "levels", "of", "calls", "defined", "by", "kwargs", ".", "Additionally", "the", "elements", ...
python
train
gwastro/pycbc
pycbc/tmpltbank/calc_moments.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/calc_moments.py#L23-L149
def determine_eigen_directions(metricParams, preserveMoments=False, vary_fmax=False, vary_density=None): """ This function will calculate the coordinate transfomations that are needed to rotate from a coordinate system described by the various Lambda components in the frequency expansion, to a coordinate system where the metric is Cartesian. Parameters ----------- metricParams : metricParameters instance Structure holding all the options for construction of the metric. preserveMoments : boolean, optional (default False) Currently only used for debugging. If this is given then if the moments structure is already set within metricParams then they will not be recalculated. vary_fmax : boolean, optional (default False) If set to False the metric and rotations are calculated once, for the full range of frequency [f_low,f_upper). If set to True the metric and rotations are calculated multiple times, for frequency ranges [f_low,f_low + i*vary_density), where i starts at 1 and runs up until f_low + (i+1)*vary_density > f_upper. Thus values greater than f_upper are *not* computed. The calculation for the full range [f_low,f_upper) is also done. vary_density : float, optional If vary_fmax is True, this will be used in computing the frequency ranges as described for vary_fmax. Returns -------- metricParams : metricParameters instance Structure holding all the options for construction of the metric. **THIS FUNCTION ONLY RETURNS THE CLASS** The following will be **added** to this structure metricParams.evals : Dictionary of numpy.array Each entry in the dictionary corresponds to the different frequency ranges described in vary_fmax. If vary_fmax = False, the only entry will be f_upper, this corresponds to integrals in [f_low,f_upper). This entry is always present. Each other entry will use floats as keys to the dictionary. These floats give the upper frequency cutoff when it is varying. Each numpy.array contains the eigenvalues which, with the eigenvectors in evecs, are needed to rotate the coordinate system to one in which the metric is the identity matrix. metricParams.evecs : Dictionary of numpy.matrix Each entry in the dictionary is as described under evals. Each numpy.matrix contains the eigenvectors which, with the eigenvalues in evals, are needed to rotate the coordinate system to one in which the metric is the identity matrix. metricParams.metric : Dictionary of numpy.matrix Each entry in the dictionary is as described under evals. Each numpy.matrix contains the metric of the parameter space in the Lambda_i coordinate system. metricParams.moments : Moments structure See the structure documentation for a description of this. This contains the result of all the integrals used in computing the metrics above. It can be used for the ethinca components calculation, or other similar calculations. """ evals = {} evecs = {} metric = {} unmax_metric = {} # First step is to get the moments needed to calculate the metric if not (metricParams.moments and preserveMoments): get_moments(metricParams, vary_fmax=vary_fmax, vary_density=vary_density) # What values are going to be in the moments # J7 is the normalization factor so it *MUST* be present list = metricParams.moments['J7'].keys() # We start looping over every item in the list of metrics for item in list: # Here we convert the moments into a form easier to use here Js = {} for i in range(-7,18): Js[i] = metricParams.moments['J%d'%(i)][item] logJs = {} for i in range(-1,18): logJs[i] = metricParams.moments['log%d'%(i)][item] loglogJs = {} for i in range(-1,18): loglogJs[i] = metricParams.moments['loglog%d'%(i)][item] logloglogJs = {} for i in range(-1,18): logloglogJs[i] = metricParams.moments['logloglog%d'%(i)][item] loglogloglogJs = {} for i in range(-1,18): loglogloglogJs[i] = metricParams.moments['loglogloglog%d'%(i)][item] mapping = generate_mapping(metricParams.pnOrder) # Calculate the metric gs, unmax_metric_curr = calculate_metric(Js, logJs, loglogJs, logloglogJs, loglogloglogJs, mapping) metric[item] = numpy.matrix(gs) unmax_metric[item] = unmax_metric_curr # And the eigenvalues evals[item],evecs[item] = numpy.linalg.eig(gs) # Numerical error can lead to small negative eigenvalues. for i in range(len(evals[item])): if evals[item][i] < 0: # Due to numerical imprecision the very small eigenvalues can # be negative. Make these positive. evals[item][i] = -evals[item][i] if evecs[item][i,i] < 0: # We demand a convention that all diagonal terms in the matrix # of eigenvalues are positive. # This is done to help visualization of the spaces (increasing # mchirp always goes the same way) evecs[item][:,i] = - evecs[item][:,i] metricParams.evals = evals metricParams.evecs = evecs metricParams.metric = metric metricParams.time_unprojected_metric = unmax_metric return metricParams
[ "def", "determine_eigen_directions", "(", "metricParams", ",", "preserveMoments", "=", "False", ",", "vary_fmax", "=", "False", ",", "vary_density", "=", "None", ")", ":", "evals", "=", "{", "}", "evecs", "=", "{", "}", "metric", "=", "{", "}", "unmax_metr...
This function will calculate the coordinate transfomations that are needed to rotate from a coordinate system described by the various Lambda components in the frequency expansion, to a coordinate system where the metric is Cartesian. Parameters ----------- metricParams : metricParameters instance Structure holding all the options for construction of the metric. preserveMoments : boolean, optional (default False) Currently only used for debugging. If this is given then if the moments structure is already set within metricParams then they will not be recalculated. vary_fmax : boolean, optional (default False) If set to False the metric and rotations are calculated once, for the full range of frequency [f_low,f_upper). If set to True the metric and rotations are calculated multiple times, for frequency ranges [f_low,f_low + i*vary_density), where i starts at 1 and runs up until f_low + (i+1)*vary_density > f_upper. Thus values greater than f_upper are *not* computed. The calculation for the full range [f_low,f_upper) is also done. vary_density : float, optional If vary_fmax is True, this will be used in computing the frequency ranges as described for vary_fmax. Returns -------- metricParams : metricParameters instance Structure holding all the options for construction of the metric. **THIS FUNCTION ONLY RETURNS THE CLASS** The following will be **added** to this structure metricParams.evals : Dictionary of numpy.array Each entry in the dictionary corresponds to the different frequency ranges described in vary_fmax. If vary_fmax = False, the only entry will be f_upper, this corresponds to integrals in [f_low,f_upper). This entry is always present. Each other entry will use floats as keys to the dictionary. These floats give the upper frequency cutoff when it is varying. Each numpy.array contains the eigenvalues which, with the eigenvectors in evecs, are needed to rotate the coordinate system to one in which the metric is the identity matrix. metricParams.evecs : Dictionary of numpy.matrix Each entry in the dictionary is as described under evals. Each numpy.matrix contains the eigenvectors which, with the eigenvalues in evals, are needed to rotate the coordinate system to one in which the metric is the identity matrix. metricParams.metric : Dictionary of numpy.matrix Each entry in the dictionary is as described under evals. Each numpy.matrix contains the metric of the parameter space in the Lambda_i coordinate system. metricParams.moments : Moments structure See the structure documentation for a description of this. This contains the result of all the integrals used in computing the metrics above. It can be used for the ethinca components calculation, or other similar calculations.
[ "This", "function", "will", "calculate", "the", "coordinate", "transfomations", "that", "are", "needed", "to", "rotate", "from", "a", "coordinate", "system", "described", "by", "the", "various", "Lambda", "components", "in", "the", "frequency", "expansion", "to", ...
python
train
andy-z/ged4py
ged4py/parser.py
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/parser.py#L60-L129
def guess_codec(file, errors="strict", require_char=False): """Look at file contents and guess its correct encoding. File must be open in binary mode and positioned at offset 0. If BOM record is present then it is assumed to be UTF-8 or UTF-16 encoded file. GEDCOM header is searched for CHAR record and encoding name is extracted from it, if BOM record is present then CHAR record must match BOM-defined encoding. :param file: File object, must be open in binary mode. :param str errors: Controls error handling behavior during string decoding, accepts same values as standard `codecs.decode` method. :param bool require_char: If True then exception is thrown if CHAR record is not found in a header, if False and CHAR is not in the header then codec determined from BOM or "gedcom" is returned. :returns: Tuple (codec_name, bom_size) :raises: :py:class:`CodecError` when codec name in file is unknown or when codec name in file contradicts codec determined from BOM. :raises: :py:class:`UnicodeDecodeError` when codec fails to decode input lines and `errors` is set to "strict" (default). """ # mapping of gedcom character set specifiers to Python encoding names gedcom_char_to_codec = { 'ansel': 'gedcom', } # check BOM first bom_codec = check_bom(file) bom_size = file.tell() codec = bom_codec or 'gedcom' # scan header until CHAR or end of header while True: # this stops at '\n' line = file.readline() if not line: raise IOError("Unexpected EOF while reading GEDCOM header") # do not decode bytes to strings here, reason is that some # stupid apps split CONC record at byte level (in middle of # of multi-byte characters). This implies that we can only # work with encodings that have ASCII as single-byte subset. line = line.lstrip().rstrip(b"\r\n") words = line.split() if len(words) >= 2 and words[0] == b"0" and words[1] != b"HEAD": # past header but have not seen CHAR if require_char: raise CodecError("GEDCOM header does not have CHAR record") else: break elif len(words) >= 3 and words[0] == b"1" and words[1] == b"CHAR": try: encoding = words[2].decode(codec, errors) encoding = gedcom_char_to_codec.get(encoding.lower(), encoding.lower()) new_codec = codecs.lookup(encoding).name except LookupError: raise CodecError("Unknown codec name {0}".format(encoding)) if bom_codec is None: codec = new_codec elif new_codec != bom_codec: raise CodecError("CHAR codec {0} is different from BOM " "codec {1}".format(new_codec, bom_codec)) break return codec, bom_size
[ "def", "guess_codec", "(", "file", ",", "errors", "=", "\"strict\"", ",", "require_char", "=", "False", ")", ":", "# mapping of gedcom character set specifiers to Python encoding names", "gedcom_char_to_codec", "=", "{", "'ansel'", ":", "'gedcom'", ",", "}", "# check BO...
Look at file contents and guess its correct encoding. File must be open in binary mode and positioned at offset 0. If BOM record is present then it is assumed to be UTF-8 or UTF-16 encoded file. GEDCOM header is searched for CHAR record and encoding name is extracted from it, if BOM record is present then CHAR record must match BOM-defined encoding. :param file: File object, must be open in binary mode. :param str errors: Controls error handling behavior during string decoding, accepts same values as standard `codecs.decode` method. :param bool require_char: If True then exception is thrown if CHAR record is not found in a header, if False and CHAR is not in the header then codec determined from BOM or "gedcom" is returned. :returns: Tuple (codec_name, bom_size) :raises: :py:class:`CodecError` when codec name in file is unknown or when codec name in file contradicts codec determined from BOM. :raises: :py:class:`UnicodeDecodeError` when codec fails to decode input lines and `errors` is set to "strict" (default).
[ "Look", "at", "file", "contents", "and", "guess", "its", "correct", "encoding", "." ]
python
train
numenta/nupic
src/nupic/database/client_jobs_dao.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/client_jobs_dao.py#L1005-L1033
def _getOneMatchingRowNoRetries(self, tableInfo, conn, fieldsToMatch, selectFieldNames): """ Return a single matching row with the requested field values from the the requested table or None if nothing matched. tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance conn: Owned connection acquired from ConnectionFactory.get() fieldsToMatch: Dictionary of internal fieldName/value mappings that identify the desired rows. If a value is an instance of ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the operator 'IN' will be used in the corresponding SQL predicate; if the value is bool: "IS TRUE/FALSE"; if the value is None: "IS NULL"; '=' will be used for all other cases. selectFieldNames: list of fields to return, using internal field names retval: A sequence of field values of the matching row in the order of the given field names; or None if there was no match. """ rows = self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch, selectFieldNames, maxRows=1) if rows: assert len(rows) == 1, repr(len(rows)) result = rows[0] else: result = None return result
[ "def", "_getOneMatchingRowNoRetries", "(", "self", ",", "tableInfo", ",", "conn", ",", "fieldsToMatch", ",", "selectFieldNames", ")", ":", "rows", "=", "self", ".", "_getMatchingRowsNoRetries", "(", "tableInfo", ",", "conn", ",", "fieldsToMatch", ",", "selectField...
Return a single matching row with the requested field values from the the requested table or None if nothing matched. tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance conn: Owned connection acquired from ConnectionFactory.get() fieldsToMatch: Dictionary of internal fieldName/value mappings that identify the desired rows. If a value is an instance of ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the operator 'IN' will be used in the corresponding SQL predicate; if the value is bool: "IS TRUE/FALSE"; if the value is None: "IS NULL"; '=' will be used for all other cases. selectFieldNames: list of fields to return, using internal field names retval: A sequence of field values of the matching row in the order of the given field names; or None if there was no match.
[ "Return", "a", "single", "matching", "row", "with", "the", "requested", "field", "values", "from", "the", "the", "requested", "table", "or", "None", "if", "nothing", "matched", "." ]
python
valid
polysquare/polysquare-generic-file-linter
polysquarelinter/spelling.py
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L467-L480
def get_transition(self, # suppress(too-many-arguments) line, line_index, column, is_escaped, comment_system_transitions, eof=False): """Return a parser state, a move-ahead amount, and an append range. If this parser state should terminate and return back to the TEXT state, then return that state and also any corresponding chunk that would have been yielded as a result. """ raise NotImplementedError("""Cannot instantiate base ParserState""")
[ "def", "get_transition", "(", "self", ",", "# suppress(too-many-arguments)", "line", ",", "line_index", ",", "column", ",", "is_escaped", ",", "comment_system_transitions", ",", "eof", "=", "False", ")", ":", "raise", "NotImplementedError", "(", "\"\"\"Cannot instanti...
Return a parser state, a move-ahead amount, and an append range. If this parser state should terminate and return back to the TEXT state, then return that state and also any corresponding chunk that would have been yielded as a result.
[ "Return", "a", "parser", "state", "a", "move", "-", "ahead", "amount", "and", "an", "append", "range", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/WSDLTools.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/WSDLTools.py#L1473-L1477
def addOutParameter(self, name, type, namespace=None, element_type=0): """Add an output parameter description to the call info.""" parameter = ParameterInfo(name, type, namespace, element_type) self.outparams.append(parameter) return parameter
[ "def", "addOutParameter", "(", "self", ",", "name", ",", "type", ",", "namespace", "=", "None", ",", "element_type", "=", "0", ")", ":", "parameter", "=", "ParameterInfo", "(", "name", ",", "type", ",", "namespace", ",", "element_type", ")", "self", ".",...
Add an output parameter description to the call info.
[ "Add", "an", "output", "parameter", "description", "to", "the", "call", "info", "." ]
python
train
maxzheng/localconfig
localconfig/manager.py
https://github.com/maxzheng/localconfig/blob/636087f2489295d9dae2693dda8a86e4daa4ff9d/localconfig/manager.py#L109-L117
def _add_dot_key(self, section, key=None): """ :param str section: Config section :param str key: Config key """ if key: self._dot_keys[self._to_dot_key(section, key)] = (section, key) else: self._dot_keys[self._to_dot_key(section)] = section
[ "def", "_add_dot_key", "(", "self", ",", "section", ",", "key", "=", "None", ")", ":", "if", "key", ":", "self", ".", "_dot_keys", "[", "self", ".", "_to_dot_key", "(", "section", ",", "key", ")", "]", "=", "(", "section", ",", "key", ")", "else", ...
:param str section: Config section :param str key: Config key
[ ":", "param", "str", "section", ":", "Config", "section", ":", "param", "str", "key", ":", "Config", "key" ]
python
train
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/refactor.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/refactor.py#L339-L360
def refactor_file(self, filename, write=False, doctests_only=False): """Refactors a file.""" input, encoding = self._read_python_source(filename) if input is None: # Reading the file failed. return input += u"\n" # Silence certain parse errors if doctests_only: self.log_debug("Refactoring doctests in %s", filename) output = self.refactor_docstring(input, filename) if self.write_unchanged_files or output != input: self.processed_file(output, filename, input, write, encoding) else: self.log_debug("No doctest changes in %s", filename) else: tree = self.refactor_string(input, filename) if self.write_unchanged_files or (tree and tree.was_changed): # The [:-1] is to take off the \n we added earlier self.processed_file(unicode(tree)[:-1], filename, write=write, encoding=encoding) else: self.log_debug("No changes in %s", filename)
[ "def", "refactor_file", "(", "self", ",", "filename", ",", "write", "=", "False", ",", "doctests_only", "=", "False", ")", ":", "input", ",", "encoding", "=", "self", ".", "_read_python_source", "(", "filename", ")", "if", "input", "is", "None", ":", "# ...
Refactors a file.
[ "Refactors", "a", "file", "." ]
python
train
tBuLi/symfit
symfit/core/fit.py
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L851-L871
def eval_hessian(self, *args, **kwargs): """ :return: Hessian evaluated at the specified point. """ # Evaluate the hessian model and use the resulting Ans namedtuple as a # dict. From this, take the relevant components. eval_hess_dict = self.hessian_model(*args, **kwargs)._asdict() hess = [[[np.broadcast_to(eval_hess_dict.get(D(var, p1, p2), 0), eval_hess_dict[var].shape) for p2 in self.params] for p1 in self.params] for var in self ] # Use numpy to broadcast these arrays together and then stack them along # the parameter dimension. We do not include the component direction in # this, because the components can have independent shapes. for idx, comp in enumerate(hess): hess[idx] = np.stack(np.broadcast_arrays(*comp)) Ans = variabletuple('Ans', self.keys()) return Ans(*hess)
[ "def", "eval_hessian", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Evaluate the hessian model and use the resulting Ans namedtuple as a", "# dict. From this, take the relevant components.", "eval_hess_dict", "=", "self", ".", "hessian_model", "(", "...
:return: Hessian evaluated at the specified point.
[ ":", "return", ":", "Hessian", "evaluated", "at", "the", "specified", "point", "." ]
python
train
joshspeagle/dynesty
dynesty/sampler.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/sampler.py#L863-L899
def add_final_live(self, print_progress=True, print_func=None): """ **A wrapper that executes the loop adding the final live points.** Adds the final set of live points to the pre-existing sequence of dead points from the current nested sampling run. Parameters ---------- print_progress : bool, optional Whether or not to output a simple summary of the current run that updates with each iteration. Default is `True`. print_func : function, optional A function that prints out the current state of the sampler. If not provided, the default :meth:`results.print_fn` is used. """ # Initialize quantities/ if print_func is None: print_func = print_fn # Add remaining live points to samples. ncall = self.ncall it = self.it - 1 for i, results in enumerate(self.add_live_points()): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results if delta_logz > 1e6: delta_logz = np.inf if logz <= -1e6: logz = -np.inf # Print progress. if print_progress: print_func(results, it, ncall, add_live_it=i+1, dlogz=0.01)
[ "def", "add_final_live", "(", "self", ",", "print_progress", "=", "True", ",", "print_func", "=", "None", ")", ":", "# Initialize quantities/", "if", "print_func", "is", "None", ":", "print_func", "=", "print_fn", "# Add remaining live points to samples.", "ncall", ...
**A wrapper that executes the loop adding the final live points.** Adds the final set of live points to the pre-existing sequence of dead points from the current nested sampling run. Parameters ---------- print_progress : bool, optional Whether or not to output a simple summary of the current run that updates with each iteration. Default is `True`. print_func : function, optional A function that prints out the current state of the sampler. If not provided, the default :meth:`results.print_fn` is used.
[ "**", "A", "wrapper", "that", "executes", "the", "loop", "adding", "the", "final", "live", "points", ".", "**", "Adds", "the", "final", "set", "of", "live", "points", "to", "the", "pre", "-", "existing", "sequence", "of", "dead", "points", "from", "the",...
python
train
SCIP-Interfaces/PySCIPOpt
examples/unfinished/portfolio_soco.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/unfinished/portfolio_soco.py#L27-L55
def p_portfolio(I,sigma,r,alpha,beta): """p_portfolio -- modified markowitz model for portfolio optimization. Parameters: - I: set of items - sigma[i]: standard deviation of item i - r[i]: revenue of item i - alpha: acceptance threshold - beta: desired confidence level Returns a model, ready to be solved. """ model = Model("p_portfolio") x = {} for i in I: x[i] = model.addVar(vtype="C", name="x(%s)"%i) # quantity of i to buy rho = model.addVar(vtype="C", name="rho") rhoaux = model.addVar(vtype="C", name="rhoaux") model.addCons(rho == quicksum(r[i]*x[i] for i in I)) model.addCons(quicksum(x[i] for i in I) == 1) model.addCons(rhoaux == (alpha - rho)*(1/phi_inv(beta))) #todo model.addCons(quicksum(sigma[i]**2 * x[i] * x[i] for i in I) <= rhoaux * rhoaux) model.setObjective(rho, "maximize") model.data = x return model
[ "def", "p_portfolio", "(", "I", ",", "sigma", ",", "r", ",", "alpha", ",", "beta", ")", ":", "model", "=", "Model", "(", "\"p_portfolio\"", ")", "x", "=", "{", "}", "for", "i", "in", "I", ":", "x", "[", "i", "]", "=", "model", ".", "addVar", ...
p_portfolio -- modified markowitz model for portfolio optimization. Parameters: - I: set of items - sigma[i]: standard deviation of item i - r[i]: revenue of item i - alpha: acceptance threshold - beta: desired confidence level Returns a model, ready to be solved.
[ "p_portfolio", "--", "modified", "markowitz", "model", "for", "portfolio", "optimization", ".", "Parameters", ":", "-", "I", ":", "set", "of", "items", "-", "sigma", "[", "i", "]", ":", "standard", "deviation", "of", "item", "i", "-", "r", "[", "i", "]...
python
train
pytorch/vision
torchvision/datasets/mnist.py
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/mnist.py#L262-L304
def download(self): """Download the EMNIST data if it doesn't exist in processed_folder already.""" import shutil import zipfile if self._check_exists(): return makedir_exist_ok(self.raw_folder) makedir_exist_ok(self.processed_folder) # download files filename = self.url.rpartition('/')[2] file_path = os.path.join(self.raw_folder, filename) download_url(self.url, root=self.raw_folder, filename=filename, md5=None) print('Extracting zip archive') with zipfile.ZipFile(file_path) as zip_f: zip_f.extractall(self.raw_folder) os.unlink(file_path) gzip_folder = os.path.join(self.raw_folder, 'gzip') for gzip_file in os.listdir(gzip_folder): if gzip_file.endswith('.gz'): self.extract_gzip(gzip_path=os.path.join(gzip_folder, gzip_file)) # process and save as torch files for split in self.splits: print('Processing ' + split) training_set = ( read_image_file(os.path.join(gzip_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))), read_label_file(os.path.join(gzip_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split))) ) test_set = ( read_image_file(os.path.join(gzip_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))), read_label_file(os.path.join(gzip_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split))) ) with open(os.path.join(self.processed_folder, self._training_file(split)), 'wb') as f: torch.save(training_set, f) with open(os.path.join(self.processed_folder, self._test_file(split)), 'wb') as f: torch.save(test_set, f) shutil.rmtree(gzip_folder) print('Done!')
[ "def", "download", "(", "self", ")", ":", "import", "shutil", "import", "zipfile", "if", "self", ".", "_check_exists", "(", ")", ":", "return", "makedir_exist_ok", "(", "self", ".", "raw_folder", ")", "makedir_exist_ok", "(", "self", ".", "processed_folder", ...
Download the EMNIST data if it doesn't exist in processed_folder already.
[ "Download", "the", "EMNIST", "data", "if", "it", "doesn", "t", "exist", "in", "processed_folder", "already", "." ]
python
test
wakatime/wakatime
wakatime/packages/urllib3/util/connection.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/util/connection.py#L7-L29
def is_connection_dropped(conn): # Platform-specific """ Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ sock = getattr(conn, 'sock', False) if sock is False: # Platform-specific: AppEngine return False if sock is None: # Connection already closed (such as by httplib). return True if not HAS_SELECT: return False try: return bool(wait_for_read(sock, timeout=0.0)) except SelectorError: return True
[ "def", "is_connection_dropped", "(", "conn", ")", ":", "# Platform-specific", "sock", "=", "getattr", "(", "conn", ",", "'sock'", ",", "False", ")", "if", "sock", "is", "False", ":", "# Platform-specific: AppEngine", "return", "False", "if", "sock", "is", "Non...
Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us.
[ "Returns", "True", "if", "the", "connection", "is", "dropped", "and", "should", "be", "closed", "." ]
python
train
Mindwerks/worldengine
worldengine/draw.py
https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/draw.py#L323-L353
def draw_simple_elevation(world, sea_level, target): """ This function can be used on a generic canvas (either an image to save on disk or a canvas part of a GUI) """ e = world.layers['elevation'].data c = numpy.empty(e.shape, dtype=numpy.float) has_ocean = not (sea_level is None or world.layers['ocean'].data is None or not world.layers['ocean'].data.any()) # or 'not any ocean' mask_land = numpy.ma.array(e, mask=world.layers['ocean'].data if has_ocean else False) # only land min_elev_land = mask_land.min() max_elev_land = mask_land.max() elev_delta_land = (max_elev_land - min_elev_land) / 11.0 if has_ocean: land = numpy.logical_not(world.layers['ocean'].data) mask_ocean = numpy.ma.array(e, mask=land) # only ocean min_elev_sea = mask_ocean.min() max_elev_sea = mask_ocean.max() elev_delta_sea = max_elev_sea - min_elev_sea c[world.layers['ocean'].data] = ((e[world.layers['ocean'].data] - min_elev_sea) / elev_delta_sea) c[land] = ((e[land] - min_elev_land) / elev_delta_land) + 1 else: c = ((e - min_elev_land) / elev_delta_land) + 1 for y in range(world.height): for x in range(world.width): r, g, b = elevation_color(c[y, x], sea_level) target.set_pixel(x, y, (int(r * 255), int(g * 255), int(b * 255), 255))
[ "def", "draw_simple_elevation", "(", "world", ",", "sea_level", ",", "target", ")", ":", "e", "=", "world", ".", "layers", "[", "'elevation'", "]", ".", "data", "c", "=", "numpy", ".", "empty", "(", "e", ".", "shape", ",", "dtype", "=", "numpy", ".",...
This function can be used on a generic canvas (either an image to save on disk or a canvas part of a GUI)
[ "This", "function", "can", "be", "used", "on", "a", "generic", "canvas", "(", "either", "an", "image", "to", "save", "on", "disk", "or", "a", "canvas", "part", "of", "a", "GUI", ")" ]
python
train
Shapeways/coyote_framework
coyote_framework/mixins/filesystem.py
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/mixins/filesystem.py#L7-L19
def create_directory(directory): """Creates a directory if it does not exist (in a thread-safe way) @param directory: The directory to create @return: The directory specified """ try: os.makedirs(directory) except OSError, e: if e.errno == errno.EEXIST and os.path.isdir(directory): pass return directory
[ "def", "create_directory", "(", "directory", ")", ":", "try", ":", "os", ".", "makedirs", "(", "directory", ")", "except", "OSError", ",", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EEXIST", "and", "os", ".", "path", ".", "isdir", "(", ...
Creates a directory if it does not exist (in a thread-safe way) @param directory: The directory to create @return: The directory specified
[ "Creates", "a", "directory", "if", "it", "does", "not", "exist", "(", "in", "a", "thread", "-", "safe", "way", ")" ]
python
train
tmux-python/libtmux
libtmux/window.py
https://github.com/tmux-python/libtmux/blob/8eb2f8bbea3a025c1567b1516653414dbc24e1fc/libtmux/window.py#L356-L378
def select_pane(self, target_pane): """ Return selected :class:`Pane` through ``$ tmux select-pane``. Parameters ---------- target_pane : str 'target_pane', '-U' ,'-D', '-L', '-R', or '-l'. Return ------ :class:`Pane` """ if target_pane in ['-l', '-U', '-D', '-L', '-R']: proc = self.cmd('select-pane', '-t%s' % self.id, target_pane) else: proc = self.cmd('select-pane', '-t%s' % target_pane) if proc.stderr: raise exc.LibTmuxException(proc.stderr) return self.attached_pane
[ "def", "select_pane", "(", "self", ",", "target_pane", ")", ":", "if", "target_pane", "in", "[", "'-l'", ",", "'-U'", ",", "'-D'", ",", "'-L'", ",", "'-R'", "]", ":", "proc", "=", "self", ".", "cmd", "(", "'select-pane'", ",", "'-t%s'", "%", "self", ...
Return selected :class:`Pane` through ``$ tmux select-pane``. Parameters ---------- target_pane : str 'target_pane', '-U' ,'-D', '-L', '-R', or '-l'. Return ------ :class:`Pane`
[ "Return", "selected", ":", "class", ":", "Pane", "through", "$", "tmux", "select", "-", "pane", "." ]
python
train
universalcore/unicore.distribute
unicore/distribute/utils.py
https://github.com/universalcore/unicore.distribute/blob/f3216fefd9df5aef31b3d1b666eb3f79db032d98/unicore/distribute/utils.py#L50-L69
def get_dict(self, section, option): """ This allows for loading of Pyramid dictionary style configuration options: [foo] bar = baz=qux zap=paz ``get_dict('foo', 'bar')`` returns ``{'baz': 'qux', 'zap': 'paz'}`` :param str section: The section to read. :param str option: The option to read from the section. :returns: dict """ return dict(re.split('\s*=\s*', value) for value in self.get_list(section, option))
[ "def", "get_dict", "(", "self", ",", "section", ",", "option", ")", ":", "return", "dict", "(", "re", ".", "split", "(", "'\\s*=\\s*'", ",", "value", ")", "for", "value", "in", "self", ".", "get_list", "(", "section", ",", "option", ")", ")" ]
This allows for loading of Pyramid dictionary style configuration options: [foo] bar = baz=qux zap=paz ``get_dict('foo', 'bar')`` returns ``{'baz': 'qux', 'zap': 'paz'}`` :param str section: The section to read. :param str option: The option to read from the section. :returns: dict
[ "This", "allows", "for", "loading", "of", "Pyramid", "dictionary", "style", "configuration", "options", ":" ]
python
train
numenta/nupic
src/nupic/algorithms/spatial_pooler.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L1476-L1492
def _updateBoostFactorsGlobal(self): """ Update boost factors when global inhibition is used """ # When global inhibition is enabled, the target activation level is # the sparsity of the spatial pooler if (self._localAreaDensity > 0): targetDensity = self._localAreaDensity else: inhibitionArea = ((2 * self._inhibitionRadius + 1) ** self._columnDimensions.size) inhibitionArea = min(self._numColumns, inhibitionArea) targetDensity = float(self._numActiveColumnsPerInhArea) / inhibitionArea targetDensity = min(targetDensity, 0.5) self._boostFactors = numpy.exp( (targetDensity - self._activeDutyCycles) * self._boostStrength)
[ "def", "_updateBoostFactorsGlobal", "(", "self", ")", ":", "# When global inhibition is enabled, the target activation level is", "# the sparsity of the spatial pooler", "if", "(", "self", ".", "_localAreaDensity", ">", "0", ")", ":", "targetDensity", "=", "self", ".", "_lo...
Update boost factors when global inhibition is used
[ "Update", "boost", "factors", "when", "global", "inhibition", "is", "used" ]
python
valid
scoutapp/scout_apm_python
src/scout_apm/api/context.py
https://github.com/scoutapp/scout_apm_python/blob/e5539ee23b8129be9b75d5007c88b6158b51294f/src/scout_apm/api/context.py#L8-L18
def add(key, value): """Adds context to the currently executing request. :key: Any String identifying the request context. Example: "user_ip", "plan", "alert_count" :value: Any json-serializable type. Example: "1.1.1.1", "free", 100 :returns: nothing. """ tr = TrackedRequest.instance() tr.tag(key, value)
[ "def", "add", "(", "key", ",", "value", ")", ":", "tr", "=", "TrackedRequest", ".", "instance", "(", ")", "tr", ".", "tag", "(", "key", ",", "value", ")" ]
Adds context to the currently executing request. :key: Any String identifying the request context. Example: "user_ip", "plan", "alert_count" :value: Any json-serializable type. Example: "1.1.1.1", "free", 100 :returns: nothing.
[ "Adds", "context", "to", "the", "currently", "executing", "request", "." ]
python
train
Jammy2211/PyAutoLens
autolens/plotters/array_plotters.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/plotters/array_plotters.py#L495-L520
def plot_mask(mask, units, kpc_per_arcsec, pointsize, zoom_offset_pixels): """Plot the mask of the array on the figure. Parameters ----------- mask : ndarray of data.array.mask.Mask The mask applied to the array, the edge of which is plotted as a set of points over the plotted array. units : str The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc'). kpc_per_arcsec : float or None The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc. pointsize : int The size of the points plotted to show the mask. """ if mask is not None: plt.gca() edge_pixels = mask.masked_grid_index_to_pixel[mask.edge_pixels] + 0.5 if zoom_offset_pixels is not None: edge_pixels -= zoom_offset_pixels edge_arcsec = mask.grid_pixels_to_grid_arcsec(grid_pixels=edge_pixels) edge_units = convert_grid_units(array=mask, grid_arcsec=edge_arcsec, units=units, kpc_per_arcsec=kpc_per_arcsec) plt.scatter(y=edge_units[:,0], x=edge_units[:,1], s=pointsize, c='k')
[ "def", "plot_mask", "(", "mask", ",", "units", ",", "kpc_per_arcsec", ",", "pointsize", ",", "zoom_offset_pixels", ")", ":", "if", "mask", "is", "not", "None", ":", "plt", ".", "gca", "(", ")", "edge_pixels", "=", "mask", ".", "masked_grid_index_to_pixel", ...
Plot the mask of the array on the figure. Parameters ----------- mask : ndarray of data.array.mask.Mask The mask applied to the array, the edge of which is plotted as a set of points over the plotted array. units : str The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc'). kpc_per_arcsec : float or None The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc. pointsize : int The size of the points plotted to show the mask.
[ "Plot", "the", "mask", "of", "the", "array", "on", "the", "figure", "." ]
python
valid
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L898-L909
def ovsdb_server_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(ovsdb_server, "name") name_key.text = kwargs.pop('name') port = ET.SubElement(ovsdb_server, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ovsdb_server_port", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ovsdb_server", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ovsdb-server\"", ",", "xmlns", "=", "\"urn:brocade...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
Vagrants/blackbird
blackbird/utils/configread.py
https://github.com/Vagrants/blackbird/blob/3b38cd5650caae362e0668dbd38bf8f88233e079/blackbird/utils/configread.py#L218-L227
def notify(self, name, job): """ Concrete method of Subject.notify(). Notify to change the status of Subject for observer. This method call Observer.update(). In this program, ConfigReader.notify() call JobObserver.update(). For exmaple, register threads.redis.ConcreateJob to JobObserver.jobs. """ for observer in self._observers: observer.update(name, job)
[ "def", "notify", "(", "self", ",", "name", ",", "job", ")", ":", "for", "observer", "in", "self", ".", "_observers", ":", "observer", ".", "update", "(", "name", ",", "job", ")" ]
Concrete method of Subject.notify(). Notify to change the status of Subject for observer. This method call Observer.update(). In this program, ConfigReader.notify() call JobObserver.update(). For exmaple, register threads.redis.ConcreateJob to JobObserver.jobs.
[ "Concrete", "method", "of", "Subject", ".", "notify", "()", ".", "Notify", "to", "change", "the", "status", "of", "Subject", "for", "observer", ".", "This", "method", "call", "Observer", ".", "update", "()", ".", "In", "this", "program", "ConfigReader", "....
python
train
spyder-ide/spyder
spyder/utils/qthelpers.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/qthelpers.py#L396-L411
def show(self, dialog): """Generic method to show a non-modal dialog and keep reference to the Qt C++ object""" for dlg in list(self.dialogs.values()): if to_text_string(dlg.windowTitle()) \ == to_text_string(dialog.windowTitle()): dlg.show() dlg.raise_() break else: dialog.show() self.dialogs[id(dialog)] = dialog dialog.accepted.connect( lambda eid=id(dialog): self.dialog_finished(eid)) dialog.rejected.connect( lambda eid=id(dialog): self.dialog_finished(eid))
[ "def", "show", "(", "self", ",", "dialog", ")", ":", "for", "dlg", "in", "list", "(", "self", ".", "dialogs", ".", "values", "(", ")", ")", ":", "if", "to_text_string", "(", "dlg", ".", "windowTitle", "(", ")", ")", "==", "to_text_string", "(", "di...
Generic method to show a non-modal dialog and keep reference to the Qt C++ object
[ "Generic", "method", "to", "show", "a", "non", "-", "modal", "dialog", "and", "keep", "reference", "to", "the", "Qt", "C", "++", "object" ]
python
train
crytic/slither
slither/core/declarations/function.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/declarations/function.py#L782-L792
def apply_visitor(self, Visitor): """ Apply a visitor to all the function expressions Args: Visitor: slither.visitors Returns list(): results of the visit """ expressions = self.expressions v = [Visitor(e).result() for e in expressions] return [item for sublist in v for item in sublist]
[ "def", "apply_visitor", "(", "self", ",", "Visitor", ")", ":", "expressions", "=", "self", ".", "expressions", "v", "=", "[", "Visitor", "(", "e", ")", ".", "result", "(", ")", "for", "e", "in", "expressions", "]", "return", "[", "item", "for", "subl...
Apply a visitor to all the function expressions Args: Visitor: slither.visitors Returns list(): results of the visit
[ "Apply", "a", "visitor", "to", "all", "the", "function", "expressions", "Args", ":", "Visitor", ":", "slither", ".", "visitors", "Returns", "list", "()", ":", "results", "of", "the", "visit" ]
python
train
MillionIntegrals/vel
vel/optimizers/sgd.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/optimizers/sgd.py#L32-L34
def create(lr, weight_decay=0, momentum=0, layer_groups=False): """ Vel factory function """ return SgdFactory(lr=lr, weight_decay=weight_decay, momentum=momentum, layer_groups=layer_groups)
[ "def", "create", "(", "lr", ",", "weight_decay", "=", "0", ",", "momentum", "=", "0", ",", "layer_groups", "=", "False", ")", ":", "return", "SgdFactory", "(", "lr", "=", "lr", ",", "weight_decay", "=", "weight_decay", ",", "momentum", "=", "momentum", ...
Vel factory function
[ "Vel", "factory", "function" ]
python
train
cloudant/python-cloudant
src/cloudant/database.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1403-L1413
def shards(self): """ Retrieves information about the shards in the current remote database. :returns: Shard information retrieval status in JSON format """ url = '/'.join((self.database_url, '_shards')) resp = self.r_session.get(url) resp.raise_for_status() return response_to_json_dict(resp)
[ "def", "shards", "(", "self", ")", ":", "url", "=", "'/'", ".", "join", "(", "(", "self", ".", "database_url", ",", "'_shards'", ")", ")", "resp", "=", "self", ".", "r_session", ".", "get", "(", "url", ")", "resp", ".", "raise_for_status", "(", ")"...
Retrieves information about the shards in the current remote database. :returns: Shard information retrieval status in JSON format
[ "Retrieves", "information", "about", "the", "shards", "in", "the", "current", "remote", "database", "." ]
python
train
openpermissions/perch
perch/model.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L246-L267
def validate_state_transition(self, user, start_state, end_state, **kwargs): """ Validate whether user can transition resource from start state to end state :param user :param start_state :param end_state :return: bool """ if start_state == end_state: raise Return(True) transitions = self.state_transitions.get(start_state, []) approved_transitions = [] can_approve = yield self.can_approve(user, **kwargs) if can_approve: approved_transitions = self.approval_state_transitions.get(start_state, []) if end_state not in transitions and end_state not in approved_transitions: raise Return(False) raise Return(True)
[ "def", "validate_state_transition", "(", "self", ",", "user", ",", "start_state", ",", "end_state", ",", "*", "*", "kwargs", ")", ":", "if", "start_state", "==", "end_state", ":", "raise", "Return", "(", "True", ")", "transitions", "=", "self", ".", "state...
Validate whether user can transition resource from start state to end state :param user :param start_state :param end_state :return: bool
[ "Validate", "whether", "user", "can", "transition", "resource", "from", "start", "state", "to", "end", "state", ":", "param", "user", ":", "param", "start_state", ":", "param", "end_state", ":", "return", ":", "bool" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/agent/iptables_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/iptables_driver.py#L66-L75
def remove_rule_entry(self, rule_info): """Remove host data object from rule_info list.""" temp_list = list(self.rule_info) for rule in temp_list: if (rule.ip == rule_info.get('ip') and rule.mac == rule_info.get('mac') and rule.port == rule_info.get('port')): LOG.debug('Removed rule info %s from the list', rule_info) self.rule_info.remove(rule)
[ "def", "remove_rule_entry", "(", "self", ",", "rule_info", ")", ":", "temp_list", "=", "list", "(", "self", ".", "rule_info", ")", "for", "rule", "in", "temp_list", ":", "if", "(", "rule", ".", "ip", "==", "rule_info", ".", "get", "(", "'ip'", ")", "...
Remove host data object from rule_info list.
[ "Remove", "host", "data", "object", "from", "rule_info", "list", "." ]
python
train
pandas-dev/pandas
pandas/tseries/offsets.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1474-L1490
def _get_offset_day(self, other): """ Find the day in the same month as other that has the same weekday as self.weekday and is the self.week'th such day in the month. Parameters ---------- other : datetime Returns ------- day : int """ mstart = datetime(other.year, other.month, 1) wday = mstart.weekday() shift_days = (self.weekday - wday) % 7 return 1 + shift_days + self.week * 7
[ "def", "_get_offset_day", "(", "self", ",", "other", ")", ":", "mstart", "=", "datetime", "(", "other", ".", "year", ",", "other", ".", "month", ",", "1", ")", "wday", "=", "mstart", ".", "weekday", "(", ")", "shift_days", "=", "(", "self", ".", "w...
Find the day in the same month as other that has the same weekday as self.weekday and is the self.week'th such day in the month. Parameters ---------- other : datetime Returns ------- day : int
[ "Find", "the", "day", "in", "the", "same", "month", "as", "other", "that", "has", "the", "same", "weekday", "as", "self", ".", "weekday", "and", "is", "the", "self", ".", "week", "th", "such", "day", "in", "the", "month", "." ]
python
train
awslabs/sockeye
sockeye/arguments.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/arguments.py#L192-L222
def simple_dict() -> Callable: """ A simple dictionary format that does not require spaces or quoting. Supported types: bool, int, float :return: A method that can be used as a type in argparse. """ def parse(dict_str: str): def _parse(value: str): if value == "True": return True if value == "False": return False if "." in value: return float(value) return int(value) _dict = dict() try: for entry in dict_str.split(","): key, value = entry.split(":") _dict[key] = _parse(value) except ValueError: raise argparse.ArgumentTypeError("Specify argument dictionary as key1:value1,key2:value2,..." " Supported types: bool, int, float.") return _dict return parse
[ "def", "simple_dict", "(", ")", "->", "Callable", ":", "def", "parse", "(", "dict_str", ":", "str", ")", ":", "def", "_parse", "(", "value", ":", "str", ")", ":", "if", "value", "==", "\"True\"", ":", "return", "True", "if", "value", "==", "\"False\"...
A simple dictionary format that does not require spaces or quoting. Supported types: bool, int, float :return: A method that can be used as a type in argparse.
[ "A", "simple", "dictionary", "format", "that", "does", "not", "require", "spaces", "or", "quoting", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/mixture.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture.py#L497-L502
def _cat_probs(self, log_probs): """Get a list of num_components batchwise probabilities.""" which_softmax = tf.nn.log_softmax if log_probs else tf.nn.softmax cat_probs = which_softmax(self.cat.logits) cat_probs = tf.unstack(cat_probs, num=self.num_components, axis=-1) return cat_probs
[ "def", "_cat_probs", "(", "self", ",", "log_probs", ")", ":", "which_softmax", "=", "tf", ".", "nn", ".", "log_softmax", "if", "log_probs", "else", "tf", ".", "nn", ".", "softmax", "cat_probs", "=", "which_softmax", "(", "self", ".", "cat", ".", "logits"...
Get a list of num_components batchwise probabilities.
[ "Get", "a", "list", "of", "num_components", "batchwise", "probabilities", "." ]
python
test
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/run_filter.py
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/run_filter.py#L35-L68
def df_filter_col_sum(df, threshold, take_abs=True): ''' filter columns in matrix at some threshold and remove rows that have all zero values ''' from copy import deepcopy from .__init__ import Network net = Network() if take_abs is True: df_copy = deepcopy(df['mat'].abs()) else: df_copy = deepcopy(df['mat']) df_copy = df_copy.transpose() df_copy = df_copy[df_copy.sum(axis=1) > threshold] df_copy = df_copy.transpose() df_copy = df_copy[df_copy.sum(axis=1) > 0] if take_abs is True: inst_rows = df_copy.index.tolist() inst_cols = df_copy.columns.tolist() df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols) if 'mat_up' in df: df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols) df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols) if 'mat_orig' in df: df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols) else: df['mat'] = df_copy return df
[ "def", "df_filter_col_sum", "(", "df", ",", "threshold", ",", "take_abs", "=", "True", ")", ":", "from", "copy", "import", "deepcopy", "from", ".", "__init__", "import", "Network", "net", "=", "Network", "(", ")", "if", "take_abs", "is", "True", ":", "df...
filter columns in matrix at some threshold and remove rows that have all zero values
[ "filter", "columns", "in", "matrix", "at", "some", "threshold", "and", "remove", "rows", "that", "have", "all", "zero", "values" ]
python
train
GeorgeArgyros/symautomata
symautomata/pdastring.py
https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/pdastring.py#L503-L509
def printer(self): """Visualizes the current state""" for key in self.statediag: if key.trans is not None and len(key.trans) > 0: print '****** ' + repr(key.id) + '(' + repr(key.type)\ + ' on sym ' + repr(key.sym) + ') ******' print key.trans
[ "def", "printer", "(", "self", ")", ":", "for", "key", "in", "self", ".", "statediag", ":", "if", "key", ".", "trans", "is", "not", "None", "and", "len", "(", "key", ".", "trans", ")", ">", "0", ":", "print", "'****** '", "+", "repr", "(", "key",...
Visualizes the current state
[ "Visualizes", "the", "current", "state" ]
python
train
benley/butcher
butcher/targets/pkgfilegroup.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/pkgfilegroup.py#L62-L73
def output_files(self): """Returns the list of output files from this rule. Paths are generated from the outputs of this rule's dependencies, with their paths translated based on prefix and strip_prefix. Returned paths are relative to buildroot. """ for dep in self.subgraph.successors(self.address): dep_rule = self.subgraph.node[dep]['target_obj'] for dep_file in dep_rule.output_files: yield self.translate_path(dep_file, dep_rule).lstrip('/')
[ "def", "output_files", "(", "self", ")", ":", "for", "dep", "in", "self", ".", "subgraph", ".", "successors", "(", "self", ".", "address", ")", ":", "dep_rule", "=", "self", ".", "subgraph", ".", "node", "[", "dep", "]", "[", "'target_obj'", "]", "fo...
Returns the list of output files from this rule. Paths are generated from the outputs of this rule's dependencies, with their paths translated based on prefix and strip_prefix. Returned paths are relative to buildroot.
[ "Returns", "the", "list", "of", "output", "files", "from", "this", "rule", "." ]
python
train
delfick/harpoon
harpoon/task_finder.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/task_finder.py#L26-L40
def find_tasks(self, overrides): """Find the custom tasks and record the associated image with each task""" tasks = self.default_tasks() configuration = self.collector.configuration for image in list(configuration["images"].keys()): path = configuration.path(["images", image, "tasks"], joined="images.{0}.tasks".format(image)) nxt = configuration.get(path, {}) tasks.update(nxt) if overrides: tasks.update(overrides) self.tasks = tasks return tasks
[ "def", "find_tasks", "(", "self", ",", "overrides", ")", ":", "tasks", "=", "self", ".", "default_tasks", "(", ")", "configuration", "=", "self", ".", "collector", ".", "configuration", "for", "image", "in", "list", "(", "configuration", "[", "\"images\"", ...
Find the custom tasks and record the associated image with each task
[ "Find", "the", "custom", "tasks", "and", "record", "the", "associated", "image", "with", "each", "task" ]
python
train
mayhewj/greenstalk
greenstalk.py
https://github.com/mayhewj/greenstalk/blob/765a5e7321a101a08e400a66e88df06c57406f58/greenstalk.py#L263-L270
def bury(self, job: Job, priority: int = DEFAULT_PRIORITY) -> None: """Buries a reserved job. :param job: The job to bury. :param priority: An integer between 0 and 4,294,967,295 where 0 is the most urgent. """ self._send_cmd(b'bury %d %d' % (job.id, priority), b'BURIED')
[ "def", "bury", "(", "self", ",", "job", ":", "Job", ",", "priority", ":", "int", "=", "DEFAULT_PRIORITY", ")", "->", "None", ":", "self", ".", "_send_cmd", "(", "b'bury %d %d'", "%", "(", "job", ".", "id", ",", "priority", ")", ",", "b'BURIED'", ")" ...
Buries a reserved job. :param job: The job to bury. :param priority: An integer between 0 and 4,294,967,295 where 0 is the most urgent.
[ "Buries", "a", "reserved", "job", "." ]
python
train
EpistasisLab/scikit-mdr
mdr/mdr.py
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/mdr.py#L191-L208
def fit_predict(self, features, class_labels): """Convenience function that fits the provided data then constructs predictions from the provided features. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix class_labels: array-like {n_samples} List of true class labels Returns ---------- array-like: {n_samples} Constructed features from the provided feature matrix """ self.fit(features, class_labels) return self.predict(features)
[ "def", "fit_predict", "(", "self", ",", "features", ",", "class_labels", ")", ":", "self", ".", "fit", "(", "features", ",", "class_labels", ")", "return", "self", ".", "predict", "(", "features", ")" ]
Convenience function that fits the provided data then constructs predictions from the provided features. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix class_labels: array-like {n_samples} List of true class labels Returns ---------- array-like: {n_samples} Constructed features from the provided feature matrix
[ "Convenience", "function", "that", "fits", "the", "provided", "data", "then", "constructs", "predictions", "from", "the", "provided", "features", "." ]
python
test
hotdoc/hotdoc
hotdoc/core/project.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/project.py#L304-L311
def __get_formatter(self, extension_name): """ Banana banana """ ext = self.extensions.get(extension_name) if ext: return ext.formatter return None
[ "def", "__get_formatter", "(", "self", ",", "extension_name", ")", ":", "ext", "=", "self", ".", "extensions", ".", "get", "(", "extension_name", ")", "if", "ext", ":", "return", "ext", ".", "formatter", "return", "None" ]
Banana banana
[ "Banana", "banana" ]
python
train
ray-project/ray
python/ray/rllib/models/preprocessors.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/models/preprocessors.py#L105-L124
def transform(self, observation): """Downsamples images from (210, 160, 3) by the configured factor.""" self.check_shape(observation) scaled = observation[25:-25, :, :] if self._dim < 84: scaled = cv2.resize(scaled, (84, 84)) # OpenAI: Resize by half, then down to 42x42 (essentially mipmapping). # If we resize directly we lose pixels that, when mapped to 42x42, # aren't close enough to the pixel boundary. scaled = cv2.resize(scaled, (self._dim, self._dim)) if self._grayscale: scaled = scaled.mean(2) scaled = scaled.astype(np.float32) # Rescale needed for maintaining 1 channel scaled = np.reshape(scaled, [self._dim, self._dim, 1]) if self._zero_mean: scaled = (scaled - 128) / 128 else: scaled *= 1.0 / 255.0 return scaled
[ "def", "transform", "(", "self", ",", "observation", ")", ":", "self", ".", "check_shape", "(", "observation", ")", "scaled", "=", "observation", "[", "25", ":", "-", "25", ",", ":", ",", ":", "]", "if", "self", ".", "_dim", "<", "84", ":", "scaled...
Downsamples images from (210, 160, 3) by the configured factor.
[ "Downsamples", "images", "from", "(", "210", "160", "3", ")", "by", "the", "configured", "factor", "." ]
python
train
LLNL/scraper
scraper/tfs/__init__.py
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L33-L51
def create_tfs_project_analysis_client(url, token=None): """ Create a project_analysis_client.py client for a Team Foundation Server Enterprise connection instance. This is helpful for understanding project languages, but currently blank for all our test conditions. If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present. """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) project_analysis_client = tfs_connection.get_client('vsts.project_analysis.v4_1.project_analysis_client.ProjectAnalysisClient') if project_analysis_client is None: msg = 'Unable to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return project_analysis_client
[ "def", "create_tfs_project_analysis_client", "(", "url", ",", "token", "=", "None", ")", ":", "if", "token", "is", "None", ":", "token", "=", "os", ".", "environ", ".", "get", "(", "'TFS_API_TOKEN'", ",", "None", ")", "tfs_connection", "=", "create_tfs_conne...
Create a project_analysis_client.py client for a Team Foundation Server Enterprise connection instance. This is helpful for understanding project languages, but currently blank for all our test conditions. If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present.
[ "Create", "a", "project_analysis_client", ".", "py", "client", "for", "a", "Team", "Foundation", "Server", "Enterprise", "connection", "instance", ".", "This", "is", "helpful", "for", "understanding", "project", "languages", "but", "currently", "blank", "for", "al...
python
test
pipermerriam/flex
flex/validation/common.py
https://github.com/pipermerriam/flex/blob/233f8149fb851a6255753bcec948cb6fefb2723b/flex/validation/common.py#L248-L264
def validate_unique_items(value, **kwargs): """ Validator for ARRAY types to enforce that all array items must be unique. """ # we can't just look at the items themselves since 0 and False are treated # the same as dictionary keys, and objects aren't hashable. counter = collections.Counter(( json.dumps(v, sort_keys=True) for v in value )) dupes = [json.loads(v) for v, count in counter.items() if count > 1] if dupes: raise ValidationError( MESSAGES['unique_items']['invalid'].format( repr(dupes), ), )
[ "def", "validate_unique_items", "(", "value", ",", "*", "*", "kwargs", ")", ":", "# we can't just look at the items themselves since 0 and False are treated", "# the same as dictionary keys, and objects aren't hashable.", "counter", "=", "collections", ".", "Counter", "(", "(", ...
Validator for ARRAY types to enforce that all array items must be unique.
[ "Validator", "for", "ARRAY", "types", "to", "enforce", "that", "all", "array", "items", "must", "be", "unique", "." ]
python
train
pallets/werkzeug
examples/coolmagic/utils.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/examples/coolmagic/utils.py#L33-L55
def export(string, template=None, **extra): """ Decorator for registering view functions and adding templates to it. """ def wrapped(f): endpoint = (f.__module__ + "." + f.__name__)[16:] if template is not None: old_f = f def f(**kwargs): rv = old_f(**kwargs) if not isinstance(rv, Response): rv = TemplateResponse(template, **(rv or {})) return rv f.__name__ = old_f.__name__ f.__doc__ = old_f.__doc__ exported_views[endpoint] = (f, string, extra) return f return wrapped
[ "def", "export", "(", "string", ",", "template", "=", "None", ",", "*", "*", "extra", ")", ":", "def", "wrapped", "(", "f", ")", ":", "endpoint", "=", "(", "f", ".", "__module__", "+", "\".\"", "+", "f", ".", "__name__", ")", "[", "16", ":", "]...
Decorator for registering view functions and adding templates to it.
[ "Decorator", "for", "registering", "view", "functions", "and", "adding", "templates", "to", "it", "." ]
python
train
MaxHalford/prince
prince/svd.py
https://github.com/MaxHalford/prince/blob/714c9cdfc4d9f8823eabf550a23ad01fe87c50d7/prince/svd.py#L10-L35
def compute_svd(X, n_components, n_iter, random_state, engine): """Computes an SVD with k components.""" # Determine what SVD engine to use if engine == 'auto': engine = 'sklearn' # Compute the SVD if engine == 'fbpca': if FBPCA_INSTALLED: U, s, V = fbpca.pca(X, k=n_components, n_iter=n_iter) else: raise ValueError('fbpca is not installed; please install it if you want to use it') elif engine == 'sklearn': U, s, V = extmath.randomized_svd( X, n_components=n_components, n_iter=n_iter, random_state=random_state ) else: raise ValueError("engine has to be one of ('auto', 'fbpca', 'sklearn')") U, V = extmath.svd_flip(U, V) return U, s, V
[ "def", "compute_svd", "(", "X", ",", "n_components", ",", "n_iter", ",", "random_state", ",", "engine", ")", ":", "# Determine what SVD engine to use", "if", "engine", "==", "'auto'", ":", "engine", "=", "'sklearn'", "# Compute the SVD", "if", "engine", "==", "'...
Computes an SVD with k components.
[ "Computes", "an", "SVD", "with", "k", "components", "." ]
python
train
yograterol/zoort
zoort.py
https://github.com/yograterol/zoort/blob/ed6669ab945007c20a83f6d468856c4eb585c752/zoort.py#L760-L801
def backup_database(args): ''' Backup one database from CLI ''' username = args.get('<user>') password = args.get('<password>') database = args['<database>'] host = args.get('<host>') or '127.0.0.1' path = args.get('--path') or os.getcwd() s3 = args.get('--upload_s3') glacier = args.get('--upload_glacier') dropbox = args.get('--upload_dropbox') swift = args.get('--upload_swift') encrypt = args.get('--encrypt') or 'Y' if not database: raise SystemExit(_error_codes.get(101)) if path and not os.path.isdir(path): raise SystemExit(_error_codes.get(105)) query = 'mongodump -d {database} --host {host} ' if username: query += '-u {username} ' if password: query += '-p {password} ' if path: query += '-o {path}/dump' local(query.format(username=username, password=password, database=database, host=host, path=path)) compress_file = compress_folder_dump( normalize_path(path) + 'dump', normalize_path(path)) shutil.rmtree(normalize_path(path) + 'dump') optional_actions(encrypt, path, compress_file, s3=s3, glacier=glacier, dropbox=dropbox, swift=swift)
[ "def", "backup_database", "(", "args", ")", ":", "username", "=", "args", ".", "get", "(", "'<user>'", ")", "password", "=", "args", ".", "get", "(", "'<password>'", ")", "database", "=", "args", "[", "'<database>'", "]", "host", "=", "args", ".", "get...
Backup one database from CLI
[ "Backup", "one", "database", "from", "CLI" ]
python
train
saltstack/salt
salt/spm/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L1015-L1077
def _build(self, args): ''' Build a package ''' if len(args) < 2: raise SPMInvocationError('A path to a formula must be specified') self.abspath = args[1].rstrip('/') comps = self.abspath.split('/') self.relpath = comps[-1] formula_path = '{0}/FORMULA'.format(self.abspath) if not os.path.exists(formula_path): raise SPMPackageError('Formula file {0} not found'.format(formula_path)) with salt.utils.files.fopen(formula_path) as fp_: formula_conf = salt.utils.yaml.safe_load(fp_) for field in ('name', 'version', 'release', 'summary', 'description'): if field not in formula_conf: raise SPMPackageError('Invalid package: a {0} must be defined'.format(field)) out_path = '{0}/{1}-{2}-{3}.spm'.format( self.opts['spm_build_dir'], formula_conf['name'], formula_conf['version'], formula_conf['release'], ) if not os.path.exists(self.opts['spm_build_dir']): os.mkdir(self.opts['spm_build_dir']) self.formula_conf = formula_conf formula_tar = tarfile.open(out_path, 'w:bz2') if 'files' in formula_conf: # This allows files to be added to the SPM file in a specific order. # It also allows for files to be tagged as a certain type, as with # RPM files. This tag is ignored here, but is used when installing # the SPM file. if isinstance(formula_conf['files'], list): formula_dir = tarfile.TarInfo(formula_conf['name']) formula_dir.type = tarfile.DIRTYPE formula_tar.addfile(formula_dir) for file_ in formula_conf['files']: for ftype in FILE_TYPES: if file_.startswith('{0}|'.format(ftype)): file_ = file_.lstrip('{0}|'.format(ftype)) formula_tar.add( os.path.join(os.getcwd(), file_), os.path.join(formula_conf['name'], file_), ) else: # If no files are specified, then the whole directory will be added. try: formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude) formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude) except TypeError: formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude) formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude) formula_tar.close() self.ui.status('Built package {0}'.format(out_path))
[ "def", "_build", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "2", ":", "raise", "SPMInvocationError", "(", "'A path to a formula must be specified'", ")", "self", ".", "abspath", "=", "args", "[", "1", "]", ".", "rstrip", "(",...
Build a package
[ "Build", "a", "package" ]
python
train
gccxml/pygccxml
pygccxml/parser/directory_cache.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/directory_cache.py#L466-L484
def acquire_filename(self, name): """Acquire a file name and return its id and its signature. """ id_ = self.__id_lut.get(name) # Is this a new entry? if id_ is None: # then create one... id_ = self.__next_id self.__next_id += 1 self.__id_lut[name] = id_ entry = filename_entry_t(name) self.__entries[id_] = entry else: # otherwise obtain the entry... entry = self.__entries[id_] entry.inc_ref_count() return id_, self._get_signature(entry)
[ "def", "acquire_filename", "(", "self", ",", "name", ")", ":", "id_", "=", "self", ".", "__id_lut", ".", "get", "(", "name", ")", "# Is this a new entry?", "if", "id_", "is", "None", ":", "# then create one...", "id_", "=", "self", ".", "__next_id", "self"...
Acquire a file name and return its id and its signature.
[ "Acquire", "a", "file", "name", "and", "return", "its", "id", "and", "its", "signature", "." ]
python
train
theonion/influxer
influxer/wsgi.py
https://github.com/theonion/influxer/blob/bdc6e4770d1e37c21a785881c9c50f4c767b34cc/influxer/wsgi.py#L502-L569
def trending(params): """gets trending content values """ # get params try: series = params.get("site", [DEFAULT_SERIES])[0] offset = params.get("offset", [DEFAULT_GROUP_BY])[0] limit = params.get("limit", [20])[0] except Exception as e: LOGGER.exception(e) return json.dumps({"error": e.message}), "500 Internal Error" # check the cache cache_key = "{}:{}:{}:{}:{}".format(memcached_prefix, "trending.json", series, offset, limit) try: data = MEMCACHED_CLIENT.get(cache_key) if data: return data, "200 OK" except Exception as e: LOGGER.exception(e) # update series name series = update_trending_series(series) # parse the limit try: limit = int(limit) except ValueError: LOGGER.error("limit param must be an integer") return json.dumps({"error": "limit param must be an integer"}), "400 Bad Request" # build the query query = "SELECT content_id, sum(value) as value " \ "FROM {series} " \ "WHERE time > now() - {offset} " \ "GROUP BY content_id;" args = {"series": series, "offset": offset} # send the request try: res = INFLUXDB_CLIENT.query(query.format(**args)) # capture errors and send them back along with the query (for inspection/debugging) except Exception as e: LOGGER.exception(e) return json.dumps({"error": e.message, "query": query.format(**args)}), "500 Internal Error" # build the response object response = flatten_response(res) # limit the number of content per site for site, points in response.items(): sorted_content = sorted(points, key=lambda p: p["value"], reverse=True)[:limit] response[site] = sorted_content clean_response = {} for site, values in response.items(): clean_name = site.split("-")[0] clean_response[clean_name] = values res = json.dumps(clean_response) # cache the response try: MEMCACHED_CLIENT.set(cache_key, res, time=MEMCACHED_EXPIRATION) except Exception as e: LOGGER.exception(e) return res, "200 OK"
[ "def", "trending", "(", "params", ")", ":", "# get params", "try", ":", "series", "=", "params", ".", "get", "(", "\"site\"", ",", "[", "DEFAULT_SERIES", "]", ")", "[", "0", "]", "offset", "=", "params", ".", "get", "(", "\"offset\"", ",", "[", "DEFA...
gets trending content values
[ "gets", "trending", "content", "values" ]
python
train
pycontribs/pyrax
pyrax/clouddns.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L228-L234
def update_record(self, record, data=None, priority=None, ttl=None, comment=None): """ Modifies an existing record for this domain. """ return self.manager.update_record(self, record, data=data, priority=priority, ttl=ttl, comment=comment)
[ "def", "update_record", "(", "self", ",", "record", ",", "data", "=", "None", ",", "priority", "=", "None", ",", "ttl", "=", "None", ",", "comment", "=", "None", ")", ":", "return", "self", ".", "manager", ".", "update_record", "(", "self", ",", "rec...
Modifies an existing record for this domain.
[ "Modifies", "an", "existing", "record", "for", "this", "domain", "." ]
python
train
saltstack/salt
salt/proxy/panos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/panos.py#L442-L453
def grains(): ''' Get the grains from the proxied device ''' if not DETAILS.get('grains_cache', {}): DETAILS['grains_cache'] = GRAINS_CACHE try: query = {'type': 'op', 'cmd': '<show><system><info></info></system></show>'} DETAILS['grains_cache'] = call(query)['result']['system'] except Exception as err: pass return DETAILS['grains_cache']
[ "def", "grains", "(", ")", ":", "if", "not", "DETAILS", ".", "get", "(", "'grains_cache'", ",", "{", "}", ")", ":", "DETAILS", "[", "'grains_cache'", "]", "=", "GRAINS_CACHE", "try", ":", "query", "=", "{", "'type'", ":", "'op'", ",", "'cmd'", ":", ...
Get the grains from the proxied device
[ "Get", "the", "grains", "from", "the", "proxied", "device" ]
python
train
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L4304-L4309
def loadWeightsFromFile(self, filename, mode='pickle'): """ Deprecated. Use loadWeights instead. """ Network.loadWeights(self, filename, mode) self.updateGraphics()
[ "def", "loadWeightsFromFile", "(", "self", ",", "filename", ",", "mode", "=", "'pickle'", ")", ":", "Network", ".", "loadWeights", "(", "self", ",", "filename", ",", "mode", ")", "self", ".", "updateGraphics", "(", ")" ]
Deprecated. Use loadWeights instead.
[ "Deprecated", ".", "Use", "loadWeights", "instead", "." ]
python
train
hyperledger/indy-node
indy_node/server/domain_req_handler.py
https://github.com/hyperledger/indy-node/blob/8fabd364eaf7d940a56df2911d9215b1e512a2de/indy_node/server/domain_req_handler.py#L784-L796
def _addAttr(self, txn, isCommitted=False) -> None: """ The state trie stores the hash of the whole attribute data at: the did+attribute name if the data is plaintext (RAW) the did+hash(attribute) if the data is encrypted (ENC) If the attribute is HASH, then nothing is stored in attribute store, the trie stores a blank value for the key did+hash """ assert get_type(txn) == ATTRIB attr_type, path, value, hashed_value, value_bytes = domain.prepare_attr_for_state(txn) self.state.set(path, value_bytes) if attr_type != HASH: self.attributeStore.set(hashed_value, value)
[ "def", "_addAttr", "(", "self", ",", "txn", ",", "isCommitted", "=", "False", ")", "->", "None", ":", "assert", "get_type", "(", "txn", ")", "==", "ATTRIB", "attr_type", ",", "path", ",", "value", ",", "hashed_value", ",", "value_bytes", "=", "domain", ...
The state trie stores the hash of the whole attribute data at: the did+attribute name if the data is plaintext (RAW) the did+hash(attribute) if the data is encrypted (ENC) If the attribute is HASH, then nothing is stored in attribute store, the trie stores a blank value for the key did+hash
[ "The", "state", "trie", "stores", "the", "hash", "of", "the", "whole", "attribute", "data", "at", ":", "the", "did", "+", "attribute", "name", "if", "the", "data", "is", "plaintext", "(", "RAW", ")", "the", "did", "+", "hash", "(", "attribute", ")", ...
python
train
tchellomello/python-arlo
pyarlo/base_station.py
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/base_station.py#L55-L90
def thread_function(self): """Thread function.""" self.__subscribed = True url = SUBSCRIBE_ENDPOINT + "?token=" + self._session_token data = self._session.query(url, method='GET', raw=True, stream=True) if not data or not data.ok: _LOGGER.debug("Did not receive a valid response. Aborting..") return None self.__sseclient = sseclient.SSEClient(data) try: for event in (self.__sseclient).events(): if not self.__subscribed: break data = json.loads(event.data) if data.get('status') == "connected": _LOGGER.debug("Successfully subscribed this base station") elif data.get('action'): action = data.get('action') resource = data.get('resource') if action == "logout": _LOGGER.debug("Logged out by some other entity") self.__subscribed = False break elif action == "is" and "subscriptions/" not in resource: self.__events.append(data) self.__event_handle.set() except TypeError as error: _LOGGER.debug("Got unexpected error: %s", error) return None return True
[ "def", "thread_function", "(", "self", ")", ":", "self", ".", "__subscribed", "=", "True", "url", "=", "SUBSCRIBE_ENDPOINT", "+", "\"?token=\"", "+", "self", ".", "_session_token", "data", "=", "self", ".", "_session", ".", "query", "(", "url", ",", "metho...
Thread function.
[ "Thread", "function", "." ]
python
train
pelotoncycle/cycle_detector
cycle_detector.py
https://github.com/pelotoncycle/cycle_detector/blob/a7c1a2e321e232de10f5862f6042471a3c60beb9/cycle_detector.py#L256-L311
def brent(seqs, f=None, start=None, key=lambda x: x): """Brent's Cycle Detector. See help(cycle_detector) for more context. Args: *args: Two iterators issueing the exact same sequence: -or- f, start: Function and starting state for finite state machine Yields: Values yielded by sequence_a if it terminates, undefined if a cycle is found. Raises: CycleFound if exception is found; if called with f and `start`, the parametres `first` and `period` will be defined indicating the offset of start of the cycle and the cycle's period. """ power = period = 1 tortise, hare = seqs yield hare.next() tortise_value = tortise.next() hare_value = hare.next() while key(tortise_value) != key(hare_value): yield hare_value if power == period: power *= 2 period = 0 if f: tortise = f_generator(f, hare_value) tortise_value = tortise.next() else: while tortise_value != hare_value: tortise_value = tortise.next() hare_value = hare.next() period += 1 if f is None: raise CycleDetected() first = 0 tortise_value = hare_value = start for _ in xrange(period): hare_value = f(hare_value) while key(tortise_value) != key(hare_value): tortise_value = f(tortise_value) hare_value = f(hare_value) first += 1 raise CycleDetected(period=period, first=first)
[ "def", "brent", "(", "seqs", ",", "f", "=", "None", ",", "start", "=", "None", ",", "key", "=", "lambda", "x", ":", "x", ")", ":", "power", "=", "period", "=", "1", "tortise", ",", "hare", "=", "seqs", "yield", "hare", ".", "next", "(", ")", ...
Brent's Cycle Detector. See help(cycle_detector) for more context. Args: *args: Two iterators issueing the exact same sequence: -or- f, start: Function and starting state for finite state machine Yields: Values yielded by sequence_a if it terminates, undefined if a cycle is found. Raises: CycleFound if exception is found; if called with f and `start`, the parametres `first` and `period` will be defined indicating the offset of start of the cycle and the cycle's period.
[ "Brent", "s", "Cycle", "Detector", "." ]
python
test
biolink/ontobio
ontobio/sim/annotation_scorer.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/annotation_scorer.py#L106-L116
def _get_scaled_score( simple_score: float, categorical_score: float, category_weight: Optional[float] = .5) -> float: """ Scaled score is the weighted average of the simple score and categorical score """ return np.average( [simple_score, categorical_score], weights=[1, category_weight] )
[ "def", "_get_scaled_score", "(", "simple_score", ":", "float", ",", "categorical_score", ":", "float", ",", "category_weight", ":", "Optional", "[", "float", "]", "=", ".5", ")", "->", "float", ":", "return", "np", ".", "average", "(", "[", "simple_score", ...
Scaled score is the weighted average of the simple score and categorical score
[ "Scaled", "score", "is", "the", "weighted", "average", "of", "the", "simple", "score", "and", "categorical", "score" ]
python
train
vinci1it2000/schedula
examples/processing_chain/process.py
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/examples/processing_chain/process.py#L51-L65
def save_outputs(outputs, output_fpath): """ Save model outputs in an Excel file. :param outputs: Model outputs. :type outputs: dict :param output_fpath: Output file path. :type output_fpath: str """ df = pd.DataFrame(outputs) with pd.ExcelWriter(output_fpath) as writer: df.to_excel(writer)
[ "def", "save_outputs", "(", "outputs", ",", "output_fpath", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "outputs", ")", "with", "pd", ".", "ExcelWriter", "(", "output_fpath", ")", "as", "writer", ":", "df", ".", "to_excel", "(", "writer", ")" ]
Save model outputs in an Excel file. :param outputs: Model outputs. :type outputs: dict :param output_fpath: Output file path. :type output_fpath: str
[ "Save", "model", "outputs", "in", "an", "Excel", "file", "." ]
python
train
aloetesting/aloe_webdriver
aloe_webdriver/__init__.py
https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/__init__.py#L706-L713
def assert_radio_selected(self, value): """ Assert the radio button with the given label (recommended), name or id is chosen. """ radio = find_field(world.browser, 'radio', value) assert radio, "Cannot find a '{}' radio button.".format(value) assert radio.is_selected(), "Radio button should be selected."
[ "def", "assert_radio_selected", "(", "self", ",", "value", ")", ":", "radio", "=", "find_field", "(", "world", ".", "browser", ",", "'radio'", ",", "value", ")", "assert", "radio", ",", "\"Cannot find a '{}' radio button.\"", ".", "format", "(", "value", ")", ...
Assert the radio button with the given label (recommended), name or id is chosen.
[ "Assert", "the", "radio", "button", "with", "the", "given", "label", "(", "recommended", ")", "name", "or", "id", "is", "chosen", "." ]
python
train
SuLab/WikidataIntegrator
wikidataintegrator/wdi_core.py
https://github.com/SuLab/WikidataIntegrator/blob/8ceb2ed1c08fec070ec9edfcf7db7b8691481b62/wikidataintegrator/wdi_core.py#L818-L829
def get_description(self, lang='en'): """ Retrieve the description in a certain language :param lang: The Wikidata language the description should be retrieved for :return: Returns the description string """ if self.fast_run: return list(self.fast_run_container.get_language_data(self.wd_item_id, lang, 'description'))[0] if 'descriptions' not in self.wd_json_representation or lang not in self.wd_json_representation['descriptions']: return '' else: return self.wd_json_representation['descriptions'][lang]['value']
[ "def", "get_description", "(", "self", ",", "lang", "=", "'en'", ")", ":", "if", "self", ".", "fast_run", ":", "return", "list", "(", "self", ".", "fast_run_container", ".", "get_language_data", "(", "self", ".", "wd_item_id", ",", "lang", ",", "'descripti...
Retrieve the description in a certain language :param lang: The Wikidata language the description should be retrieved for :return: Returns the description string
[ "Retrieve", "the", "description", "in", "a", "certain", "language", ":", "param", "lang", ":", "The", "Wikidata", "language", "the", "description", "should", "be", "retrieved", "for", ":", "return", ":", "Returns", "the", "description", "string" ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/transport/adapterstream.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapterstream.py#L247-L265
def _try_reconnect(self): """Try to recover an interrupted connection.""" try: if self.connection_interrupted: self.connect_direct(self.connection_string, force=True) self.connection_interrupted = False self.connected = True # Reenable streaming interface if that was open before as well if self._reports is not None: self._loop.run_coroutine(self.adapter.open_interface(0, 'streaming')) # Reenable tracing interface if that was open before as well if self._traces is not None: self._loop.run_coroutine(self.adapter.open_interface(0, 'tracing')) except HardwareError as exc: self._logger.exception("Error reconnecting to device after an unexpected disconnect") raise HardwareError("Device disconnected unexpectedly and we could not reconnect", reconnect_error=exc) from exc
[ "def", "_try_reconnect", "(", "self", ")", ":", "try", ":", "if", "self", ".", "connection_interrupted", ":", "self", ".", "connect_direct", "(", "self", ".", "connection_string", ",", "force", "=", "True", ")", "self", ".", "connection_interrupted", "=", "F...
Try to recover an interrupted connection.
[ "Try", "to", "recover", "an", "interrupted", "connection", "." ]
python
train
CivicSpleen/ambry
ambry/bundle/bundle.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L2736-L2739
def is_finalized(self): """Return True if the bundle is installed.""" return self.state == self.STATES.FINALIZED or self.state == self.STATES.INSTALLED
[ "def", "is_finalized", "(", "self", ")", ":", "return", "self", ".", "state", "==", "self", ".", "STATES", ".", "FINALIZED", "or", "self", ".", "state", "==", "self", ".", "STATES", ".", "INSTALLED" ]
Return True if the bundle is installed.
[ "Return", "True", "if", "the", "bundle", "is", "installed", "." ]
python
train
gkbrk/hackchat
hackchat.py
https://github.com/gkbrk/hackchat/blob/f8c96dc1ce528ba7800130e43848127f0db3e057/hackchat.py#L38-L41
def _send_packet(self, packet): """Sends <packet> (<dict>) to https://hack.chat.""" encoded = json.dumps(packet) self.ws.send(encoded)
[ "def", "_send_packet", "(", "self", ",", "packet", ")", ":", "encoded", "=", "json", ".", "dumps", "(", "packet", ")", "self", ".", "ws", ".", "send", "(", "encoded", ")" ]
Sends <packet> (<dict>) to https://hack.chat.
[ "Sends", "<packet", ">", "(", "<dict", ">", ")", "to", "https", ":", "//", "hack", ".", "chat", "." ]
python
train
MycroftAI/mycroft-precise
precise/scripts/train_generated.py
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L131-L141
def chunk_audio_pieces(self, pieces, chunk_size): """Convert chunks of audio into a series of equally sized pieces""" left_over = np.array([]) for piece in pieces: if left_over.size == 0: combined = piece else: combined = np.concatenate([left_over, piece], axis=-1) for chunk in chunk_audio(combined.T, chunk_size): yield chunk.T left_over = piece[-(len(piece) % chunk_size):]
[ "def", "chunk_audio_pieces", "(", "self", ",", "pieces", ",", "chunk_size", ")", ":", "left_over", "=", "np", ".", "array", "(", "[", "]", ")", "for", "piece", "in", "pieces", ":", "if", "left_over", ".", "size", "==", "0", ":", "combined", "=", "pie...
Convert chunks of audio into a series of equally sized pieces
[ "Convert", "chunks", "of", "audio", "into", "a", "series", "of", "equally", "sized", "pieces" ]
python
train
rfverbruggen/rachiopy
rachiopy/zone.py
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/zone.py#L11-L15
def start(self, zone_id, duration): """Start a zone.""" path = 'zone/start' payload = {'id': zone_id, 'duration': duration} return self.rachio.put(path, payload)
[ "def", "start", "(", "self", ",", "zone_id", ",", "duration", ")", ":", "path", "=", "'zone/start'", "payload", "=", "{", "'id'", ":", "zone_id", ",", "'duration'", ":", "duration", "}", "return", "self", ".", "rachio", ".", "put", "(", "path", ",", ...
Start a zone.
[ "Start", "a", "zone", "." ]
python
train
mapillary/mapillary_tools
mapillary_tools/exif_read.py
https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L20-L39
def format_time(time_string): ''' Format time string with invalid time elements in hours/minutes/seconds Format for the timestring needs to be "%Y_%m_%d_%H_%M_%S" e.g. 2014_03_31_24_10_11 => 2014_04_01_00_10_11 ''' subseconds = False data = time_string.split("_") hours, minutes, seconds = int(data[3]), int(data[4]), int(data[5]) date = datetime.datetime.strptime("_".join(data[:3]), "%Y_%m_%d") subsec = 0.0 if len(data) == 7: if float(data[6]) != 0: subsec = float(data[6]) / 10**len(data[6]) subseconds = True date_time = date + \ datetime.timedelta(hours=hours, minutes=minutes, seconds=seconds + subsec) return date_time, subseconds
[ "def", "format_time", "(", "time_string", ")", ":", "subseconds", "=", "False", "data", "=", "time_string", ".", "split", "(", "\"_\"", ")", "hours", ",", "minutes", ",", "seconds", "=", "int", "(", "data", "[", "3", "]", ")", ",", "int", "(", "data"...
Format time string with invalid time elements in hours/minutes/seconds Format for the timestring needs to be "%Y_%m_%d_%H_%M_%S" e.g. 2014_03_31_24_10_11 => 2014_04_01_00_10_11
[ "Format", "time", "string", "with", "invalid", "time", "elements", "in", "hours", "/", "minutes", "/", "seconds", "Format", "for", "the", "timestring", "needs", "to", "be", "%Y_%m_%d_%H_%M_%S" ]
python
train
dmwm/DBS
Server/Python/src/dbs/business/DBSOutputConfig.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSOutputConfig.py#L55-L107
def insertOutputConfig(self, businput): """ Method to insert the Output Config. app_name, release_version, pset_hash, global_tag and output_module_label are required. args: businput(dic): input dictionary. Updated Oct 12, 2011 """ if not ("app_name" in businput and "release_version" in businput\ and "pset_hash" in businput and "output_module_label" in businput and "global_tag" in businput): dbsExceptionHandler('dbsException-invalid-input', "business/DBSOutputConfig/insertOutputConfig require:\ app_name, release_version, pset_hash, output_module_label and global_tag") conn = self.dbi.connection() tran = conn.begin() try: # Proceed with o/p module insertion businput['scenario'] = businput.get("scenario", None) businput['pset_name'] = businput.get("pset_name", None) self.outmodin.execute(conn, businput, tran) tran.commit() tran = None except SQLAlchemyIntegrityError as ex: if str(ex).find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1: #if the validation is due to a unique constrain break in OUTPUT_MODULE_CONFIGS if str(ex).find("TUC_OMC_1") != -1: pass #otherwise, try again else: try: self.outmodin.execute(conn, businput, tran) tran.commit() tran = None except SQLAlchemyIntegrityError as ex1: if str(ex1).find("unique constraint") != -1 and str(ex1).find("TUC_OMC_1") != -1: pass except Exception as e1: if tran: tran.rollback() tran = None raise else: raise except Exception as e: if tran: tran.rollback() raise finally: if tran: tran.rollback() if conn: conn.close()
[ "def", "insertOutputConfig", "(", "self", ",", "businput", ")", ":", "if", "not", "(", "\"app_name\"", "in", "businput", "and", "\"release_version\"", "in", "businput", "and", "\"pset_hash\"", "in", "businput", "and", "\"output_module_label\"", "in", "businput", "...
Method to insert the Output Config. app_name, release_version, pset_hash, global_tag and output_module_label are required. args: businput(dic): input dictionary. Updated Oct 12, 2011
[ "Method", "to", "insert", "the", "Output", "Config", ".", "app_name", "release_version", "pset_hash", "global_tag", "and", "output_module_label", "are", "required", ".", "args", ":", "businput", "(", "dic", ")", ":", "input", "dictionary", "." ]
python
train
MisterY/gnucash-portfolio
gnucash_portfolio/splitsaggregate.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/splitsaggregate.py#L39-L47
def get_for_accounts(self, accounts: List[Account]): ''' Get all splits for the given accounts ''' account_ids = [acc.guid for acc in accounts] query = ( self.query .filter(Split.account_guid.in_(account_ids)) ) splits = query.all() return splits
[ "def", "get_for_accounts", "(", "self", ",", "accounts", ":", "List", "[", "Account", "]", ")", ":", "account_ids", "=", "[", "acc", ".", "guid", "for", "acc", "in", "accounts", "]", "query", "=", "(", "self", ".", "query", ".", "filter", "(", "Split...
Get all splits for the given accounts
[ "Get", "all", "splits", "for", "the", "given", "accounts" ]
python
train
consbio/parserutils
parserutils/collections.py
https://github.com/consbio/parserutils/blob/f13f80db99ed43479336b116e38512e3566e4623/parserutils/collections.py#L228-L245
def reduce_value(value, default=EMPTY_STR): """ :return: a single value from lists, tuples or sets with one item; otherwise, the value itself if not empty or the default if it is. """ if hasattr(value, '__len__'): vlen = len(value) if vlen == 0: return default elif vlen == 1: if isinstance(value, set): return value.pop() elif isinstance(value, _reduce_types): return value[0] return default if value is None else value
[ "def", "reduce_value", "(", "value", ",", "default", "=", "EMPTY_STR", ")", ":", "if", "hasattr", "(", "value", ",", "'__len__'", ")", ":", "vlen", "=", "len", "(", "value", ")", "if", "vlen", "==", "0", ":", "return", "default", "elif", "vlen", "=="...
:return: a single value from lists, tuples or sets with one item; otherwise, the value itself if not empty or the default if it is.
[ ":", "return", ":", "a", "single", "value", "from", "lists", "tuples", "or", "sets", "with", "one", "item", ";", "otherwise", "the", "value", "itself", "if", "not", "empty", "or", "the", "default", "if", "it", "is", "." ]
python
train
OrangeTux/einder
einder/client.py
https://github.com/OrangeTux/einder/blob/deb2c5f79a69b684257fe939659c3bd751556fd5/einder/client.py#L114-L118
def power_on(self): """ Power on the set-top box. """ if not self.is_powered_on(): log.debug('Powering on set-top box at %s:%s.', self.ip, self.port) self.send_key(keys.POWER)
[ "def", "power_on", "(", "self", ")", ":", "if", "not", "self", ".", "is_powered_on", "(", ")", ":", "log", ".", "debug", "(", "'Powering on set-top box at %s:%s.'", ",", "self", ".", "ip", ",", "self", ".", "port", ")", "self", ".", "send_key", "(", "k...
Power on the set-top box.
[ "Power", "on", "the", "set", "-", "top", "box", "." ]
python
train
pandas-dev/pandas
pandas/util/_exceptions.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_exceptions.py#L5-L16
def rewrite_exception(old_name, new_name): """Rewrite the message of an exception.""" try: yield except Exception as e: msg = e.args[0] msg = msg.replace(old_name, new_name) args = (msg,) if len(e.args) > 1: args = args + e.args[1:] e.args = args raise
[ "def", "rewrite_exception", "(", "old_name", ",", "new_name", ")", ":", "try", ":", "yield", "except", "Exception", "as", "e", ":", "msg", "=", "e", ".", "args", "[", "0", "]", "msg", "=", "msg", ".", "replace", "(", "old_name", ",", "new_name", ")",...
Rewrite the message of an exception.
[ "Rewrite", "the", "message", "of", "an", "exception", "." ]
python
train
inveniosoftware-attic/invenio-utils
invenio_utils/html.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/html.py#L661-L678
def remove_html_markup(text, replacechar=' ', remove_escaped_chars_p=True): """ Remove HTML markup from text. @param text: Input text. @type text: string. @param replacechar: By which character should we replace HTML markup. Usually, a single space or an empty string are nice values. @type replacechar: string @param remove_escaped_chars_p: If True, also remove escaped characters like '&amp;', '&lt;', '&gt;' and '&quot;'. @type remove_escaped_chars_p: boolean @return: Input text with HTML markup removed. @rtype: string """ if not remove_escaped_chars_p: return RE_HTML_WITHOUT_ESCAPED_CHARS.sub(replacechar, text) return RE_HTML.sub(replacechar, text)
[ "def", "remove_html_markup", "(", "text", ",", "replacechar", "=", "' '", ",", "remove_escaped_chars_p", "=", "True", ")", ":", "if", "not", "remove_escaped_chars_p", ":", "return", "RE_HTML_WITHOUT_ESCAPED_CHARS", ".", "sub", "(", "replacechar", ",", "text", ")",...
Remove HTML markup from text. @param text: Input text. @type text: string. @param replacechar: By which character should we replace HTML markup. Usually, a single space or an empty string are nice values. @type replacechar: string @param remove_escaped_chars_p: If True, also remove escaped characters like '&amp;', '&lt;', '&gt;' and '&quot;'. @type remove_escaped_chars_p: boolean @return: Input text with HTML markup removed. @rtype: string
[ "Remove", "HTML", "markup", "from", "text", "." ]
python
train
michaelaye/pyciss
pyciss/opusapi.py
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/opusapi.py#L184-L196
def create_request_with_query(self, kind, query, size="thumb", fmt="json"): """api/data.[fmt], api/images/[size].[fmt] api/files.[fmt] kind = ['data', 'images', 'files'] """ if kind == "data" or kind == "files": url = "{}/{}.{}".format(base_url, kind, fmt) elif kind == "images": url = "{}/images/{}.{}".format(base_url, size, fmt) self.url = url self.r = requests.get(url, params=unquote(urlencode(query)))
[ "def", "create_request_with_query", "(", "self", ",", "kind", ",", "query", ",", "size", "=", "\"thumb\"", ",", "fmt", "=", "\"json\"", ")", ":", "if", "kind", "==", "\"data\"", "or", "kind", "==", "\"files\"", ":", "url", "=", "\"{}/{}.{}\"", ".", "form...
api/data.[fmt], api/images/[size].[fmt] api/files.[fmt] kind = ['data', 'images', 'files']
[ "api", "/", "data", ".", "[", "fmt", "]", "api", "/", "images", "/", "[", "size", "]", ".", "[", "fmt", "]", "api", "/", "files", ".", "[", "fmt", "]" ]
python
train
ContextLab/hypertools
hypertools/_shared/params.py
https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_shared/params.py#L18-L48
def default_params(model, update_dict=None): """ Loads and updates default model parameters Parameters ---------- model : str The name of a model update_dict : dict A dict to update default parameters Returns ---------- params : dict A dictionary of parameters """ if model in parameters: params = parameters[model].copy() else: params = None if update_dict: if params is None: params = {} params.update(update_dict) return params
[ "def", "default_params", "(", "model", ",", "update_dict", "=", "None", ")", ":", "if", "model", "in", "parameters", ":", "params", "=", "parameters", "[", "model", "]", ".", "copy", "(", ")", "else", ":", "params", "=", "None", "if", "update_dict", ":...
Loads and updates default model parameters Parameters ---------- model : str The name of a model update_dict : dict A dict to update default parameters Returns ---------- params : dict A dictionary of parameters
[ "Loads", "and", "updates", "default", "model", "parameters" ]
python
train
sbg/sevenbridges-python
sevenbridges/models/file.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/file.py#L491-L507
def list_files(self, offset=None, limit=None, api=None): """List files in a folder :param api: Api instance :param offset: Pagination offset :param limit: Pagination limit :return: List of files """ api = api or self._API if not self.is_folder(): raise SbgError('{name} is not a folder'.format(name=self.name)) url = self._URL['list_folder'].format(id=self.id) return super(File, self.__class__)._query( api=api, url=url, offset=offset, limit=limit, fields='_all' )
[ "def", "list_files", "(", "self", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "api", "=", "None", ")", ":", "api", "=", "api", "or", "self", ".", "_API", "if", "not", "self", ".", "is_folder", "(", ")", ":", "raise", "SbgError", "...
List files in a folder :param api: Api instance :param offset: Pagination offset :param limit: Pagination limit :return: List of files
[ "List", "files", "in", "a", "folder", ":", "param", "api", ":", "Api", "instance", ":", "param", "offset", ":", "Pagination", "offset", ":", "param", "limit", ":", "Pagination", "limit", ":", "return", ":", "List", "of", "files" ]
python
train
drhagen/parsita
parsita/parsers.py
https://github.com/drhagen/parsita/blob/d97414a05541f48231381f607d1d2e6b50781d39/parsita/parsers.py#L451-L463
def rep1(parser: Union[Parser, Sequence[Input]]) -> RepeatedOnceParser: """Match a parser one or more times repeatedly. This matches ``parser`` multiple times in a row. If it matches as least once, it returns a list of values from each time ``parser`` matched. If it does not match ``parser`` at all, it fails. Args: parser: Parser or literal """ if isinstance(parser, str): parser = lit(parser) return RepeatedOnceParser(parser)
[ "def", "rep1", "(", "parser", ":", "Union", "[", "Parser", ",", "Sequence", "[", "Input", "]", "]", ")", "->", "RepeatedOnceParser", ":", "if", "isinstance", "(", "parser", ",", "str", ")", ":", "parser", "=", "lit", "(", "parser", ")", "return", "Re...
Match a parser one or more times repeatedly. This matches ``parser`` multiple times in a row. If it matches as least once, it returns a list of values from each time ``parser`` matched. If it does not match ``parser`` at all, it fails. Args: parser: Parser or literal
[ "Match", "a", "parser", "one", "or", "more", "times", "repeatedly", "." ]
python
test
madprime/cgivar2gvcf
cgivar2gvcf/__init__.py
https://github.com/madprime/cgivar2gvcf/blob/13b4cd8da08669f7e4b0ceed77a7a17082f91037/cgivar2gvcf/__init__.py#L473-L522
def from_command_line(): """ Run CGI var to gVCF conversion from the command line. """ # Parse options parser = argparse.ArgumentParser( description='Convert Complete Genomics var files to gVCF format.') parser.add_argument( '-d', '--refseqdir', metavar='REFSEQDIR', required=True, dest='refseqdir', help='Directory twobit reference genomes files are stored.') parser.add_argument( '-i', '--input', metavar='INPUTVARFILE', dest='cgivarfile', help='Path to Complete Genomics var file to convert. If omitted, data ' ' also be piped in as standard input.') parser.add_argument( '-o', '--output', metavar='OUTPUTVCFFILE', dest='vcfoutfile', help='Path to where to save output VCF file.') parser.add_argument( '-D', '--download', action='store_true', dest='downloadrefseq', help='Download the 2bit file from UCSC to REFSEQDIR, if needed.') parser.add_argument( '-v', '--var-only', action='store_true', dest='varonly', help='Only report variant lines (i.e. VCF, but not gVCF)') args = parser.parse_args() # Get local twobit file from its directory. Download and store if needed. twobit_path, twobit_name = get_reference_genome_file( args.refseqdir, build='b37') # Handle input if sys.stdin.isatty(): # false if data is piped in var_input = args.cgivarfile else: var_input = sys.stdin # Handle output if args.vcfoutfile: convert_to_file(var_input, args.vcfoutfile, twobit_path, twobit_name, args.varonly) else: for line in convert( cgi_input=var_input, twobit_ref=twobit_path, twobit_name=twobit_name, var_only=args.varonly): print(line)
[ "def", "from_command_line", "(", ")", ":", "# Parse options", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Convert Complete Genomics var files to gVCF format.'", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--refseqdir'", ",", ...
Run CGI var to gVCF conversion from the command line.
[ "Run", "CGI", "var", "to", "gVCF", "conversion", "from", "the", "command", "line", "." ]
python
train
openpermissions/perch
perch/migrations/user_22e76f4ff8bd41e19aa52839fc8f13a1.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/migrations/user_22e76f4ff8bd41e19aa52839fc8f13a1.py#L10-L23
def migrate_user(instance): """ Move User.organisations['global']['role'] to top-level property and remove verified flag """ instance._resource.pop('verified', None) if 'role' in instance._resource: return instance global_org = instance.organisations.pop('global', {}) instance.role = global_org.get('role', perch.User.roles.default.value) return instance
[ "def", "migrate_user", "(", "instance", ")", ":", "instance", ".", "_resource", ".", "pop", "(", "'verified'", ",", "None", ")", "if", "'role'", "in", "instance", ".", "_resource", ":", "return", "instance", "global_org", "=", "instance", ".", "organisations...
Move User.organisations['global']['role'] to top-level property and remove verified flag
[ "Move", "User", ".", "organisations", "[", "global", "]", "[", "role", "]", "to", "top", "-", "level", "property", "and", "remove", "verified", "flag" ]
python
train
ioos/compliance-checker
compliance_checker/cf/cf.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L850-L871
def check_convention_globals(self, ds): ''' Check the common global attributes are strings if they exist. CF §2.6.2 title/history global attributes, must be strings. Do not need to exist. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of Results ''' attrs = ['title', 'history'] valid_globals = TestCtx(BaseCheck.MEDIUM, self.section_titles['2.6']) for attr in attrs: dataset_attr = getattr(ds, attr, None) is_string = isinstance(dataset_attr, basestring) valid_globals.assert_true(is_string and len(dataset_attr), "§2.6.2 global attribute {} should exist and be a non-empty string" # subsection message "".format(attr)) return valid_globals.to_result()
[ "def", "check_convention_globals", "(", "self", ",", "ds", ")", ":", "attrs", "=", "[", "'title'", ",", "'history'", "]", "valid_globals", "=", "TestCtx", "(", "BaseCheck", ".", "MEDIUM", ",", "self", ".", "section_titles", "[", "'2.6'", "]", ")", "for", ...
Check the common global attributes are strings if they exist. CF §2.6.2 title/history global attributes, must be strings. Do not need to exist. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of Results
[ "Check", "the", "common", "global", "attributes", "are", "strings", "if", "they", "exist", "." ]
python
train
theno/fabsetup
fabsetup/fabfile/setup/__init__.py
https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabfile/setup/__init__.py#L92-L117
def solarized(): '''Set solarized colors in urxvt, tmux, and vim. More Infos: * Getting solarized colors right with urxvt, st, tmux and vim: https://bbs.archlinux.org/viewtopic.php?id=164108 * Creating ~/.Xresources: https://wiki.archlinux.org/index.php/Rxvt-unicode#Creating_.7E.2F.Xresources * Select a good font on Ubuntu: https://michaelheap.com/getting-solarized-working-on-ubuntu/ * tmux and 256 colors: http://unix.stackexchange.com/a/118903 ''' install_packages(['rxvt-unicode', 'tmux', 'vim']) install_file_legacy('~/.Xresources') if env.host_string == 'localhost': run('xrdb ~/.Xresources') # install and call term_colors run('mkdir -p ~/bin') install_file_legacy('~/bin/term_colors') run('chmod 755 ~/bin/term_colors') run('~/bin/term_colors')
[ "def", "solarized", "(", ")", ":", "install_packages", "(", "[", "'rxvt-unicode'", ",", "'tmux'", ",", "'vim'", "]", ")", "install_file_legacy", "(", "'~/.Xresources'", ")", "if", "env", ".", "host_string", "==", "'localhost'", ":", "run", "(", "'xrdb ~/.Xres...
Set solarized colors in urxvt, tmux, and vim. More Infos: * Getting solarized colors right with urxvt, st, tmux and vim: https://bbs.archlinux.org/viewtopic.php?id=164108 * Creating ~/.Xresources: https://wiki.archlinux.org/index.php/Rxvt-unicode#Creating_.7E.2F.Xresources * Select a good font on Ubuntu: https://michaelheap.com/getting-solarized-working-on-ubuntu/ * tmux and 256 colors: http://unix.stackexchange.com/a/118903
[ "Set", "solarized", "colors", "in", "urxvt", "tmux", "and", "vim", "." ]
python
train
gpennington/PyMarvel
marvel/story.py
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/story.py#L91-L100
def get_creators(self, *args, **kwargs): """ Returns a full CreatorDataWrapper object for this story. /stories/{storyId}/creators :returns: CreatorDataWrapper -- A new request to API. Contains full results set. """ from .creator import Creator, CreatorDataWrapper return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs)
[ "def", "get_creators", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", ".", "creator", "import", "Creator", ",", "CreatorDataWrapper", "return", "self", ".", "get_related_resource", "(", "Creator", ",", "CreatorDataWrapper", ",", "a...
Returns a full CreatorDataWrapper object for this story. /stories/{storyId}/creators :returns: CreatorDataWrapper -- A new request to API. Contains full results set.
[ "Returns", "a", "full", "CreatorDataWrapper", "object", "for", "this", "story", "." ]
python
train
pjuren/pyokit
src/pyokit/io/fastaIterators.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/fastaIterators.py#L148-L186
def fastaIterator(fn, useMutableString=False, verbose=False): """ A generator function which yields fastaSequence objects from a fasta-format file or stream. :param fn: a file-like stream or a string; if this is a string, it's treated as a filename, else it's treated it as a file-like object, which must have a readline() method. :param useMustableString: if True, construct sequences from lists of chars, rather than python string objects, to allow more efficient editing. Use with caution. :param verbose: if True, output additional status messages to stderr about progress """ fh = fn if type(fh).__name__ == "str": fh = open(fh) if verbose: try: pind = __build_progress_indicator(fh) except ProgressIndicatorError as e: sys.stderr.write("Warning: unable to show progress for stream. " + "Reason: " + str(e)) verbose = False prev_line = None while True: seqHeader = __read_seq_header(fh, prev_line) name = seqHeader[1:].strip() seq_data, prev_line = __read_seq_data(fh) if verbose: pind.done = fh.tell() pind.showProgress(to_strm=sys.stderr) yield Sequence(name, seq_data, useMutableString) # remember where we stopped for next call, or finish if prev_line == "": break
[ "def", "fastaIterator", "(", "fn", ",", "useMutableString", "=", "False", ",", "verbose", "=", "False", ")", ":", "fh", "=", "fn", "if", "type", "(", "fh", ")", ".", "__name__", "==", "\"str\"", ":", "fh", "=", "open", "(", "fh", ")", "if", "verbos...
A generator function which yields fastaSequence objects from a fasta-format file or stream. :param fn: a file-like stream or a string; if this is a string, it's treated as a filename, else it's treated it as a file-like object, which must have a readline() method. :param useMustableString: if True, construct sequences from lists of chars, rather than python string objects, to allow more efficient editing. Use with caution. :param verbose: if True, output additional status messages to stderr about progress
[ "A", "generator", "function", "which", "yields", "fastaSequence", "objects", "from", "a", "fasta", "-", "format", "file", "or", "stream", "." ]
python
train
boundary/pulse-api-cli
boundary/hostgroup_update.py
https://github.com/boundary/pulse-api-cli/blob/b01ca65b442eed19faac309c9d62bbc3cb2c098f/boundary/hostgroup_update.py#L38-L47
def get_arguments(self): """ Extracts the specific arguments of this CLI """ HostgroupModify.get_arguments(self) if self.args.host_group_id is not None: self.host_group_id = self.args.host_group_id self.path = "v1/hostgroup/" + str(self.host_group_id)
[ "def", "get_arguments", "(", "self", ")", ":", "HostgroupModify", ".", "get_arguments", "(", "self", ")", "if", "self", ".", "args", ".", "host_group_id", "is", "not", "None", ":", "self", ".", "host_group_id", "=", "self", ".", "args", ".", "host_group_id...
Extracts the specific arguments of this CLI
[ "Extracts", "the", "specific", "arguments", "of", "this", "CLI" ]
python
test
nschloe/matplotlib2tikz
matplotlib2tikz/save.py
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L322-L327
def extend(self, content, zorder): """ Extends with a list and a z-order """ if zorder not in self._content: self._content[zorder] = [] self._content[zorder].extend(content)
[ "def", "extend", "(", "self", ",", "content", ",", "zorder", ")", ":", "if", "zorder", "not", "in", "self", ".", "_content", ":", "self", ".", "_content", "[", "zorder", "]", "=", "[", "]", "self", ".", "_content", "[", "zorder", "]", ".", "extend"...
Extends with a list and a z-order
[ "Extends", "with", "a", "list", "and", "a", "z", "-", "order" ]
python
train
RJT1990/pyflux
pyflux/ssm/ndynlin.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/ndynlin.py#L151-L169
def state_likelihood(self, beta, alpha): """ Returns likelihood of the states given the variance latent variables Parameters ---------- beta : np.array Contains untransformed starting values for latent variables alpha : np.array State matrix Returns ---------- State likelihood """ _, _, _, Q = self._ss_matrices(beta) state_lik = 0 for i in range(alpha.shape[0]): state_lik += np.sum(ss.norm.logpdf(alpha[i][1:]-alpha[i][:-1],loc=0,scale=np.power(Q[i][i],0.5))) return state_lik
[ "def", "state_likelihood", "(", "self", ",", "beta", ",", "alpha", ")", ":", "_", ",", "_", ",", "_", ",", "Q", "=", "self", ".", "_ss_matrices", "(", "beta", ")", "state_lik", "=", "0", "for", "i", "in", "range", "(", "alpha", ".", "shape", "[",...
Returns likelihood of the states given the variance latent variables Parameters ---------- beta : np.array Contains untransformed starting values for latent variables alpha : np.array State matrix Returns ---------- State likelihood
[ "Returns", "likelihood", "of", "the", "states", "given", "the", "variance", "latent", "variables" ]
python
train
dstufft/crust
crust/query.py
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L501-L511
def _fill_cache(self, num=None): """ Fills the result cache with 'num' more entries (or until the results iterator is exhausted). """ if self._iter: try: for i in range(num or ITER_CHUNK_SIZE): self._result_cache.append(next(self._iter)) except StopIteration: self._iter = None
[ "def", "_fill_cache", "(", "self", ",", "num", "=", "None", ")", ":", "if", "self", ".", "_iter", ":", "try", ":", "for", "i", "in", "range", "(", "num", "or", "ITER_CHUNK_SIZE", ")", ":", "self", ".", "_result_cache", ".", "append", "(", "next", "...
Fills the result cache with 'num' more entries (or until the results iterator is exhausted).
[ "Fills", "the", "result", "cache", "with", "num", "more", "entries", "(", "or", "until", "the", "results", "iterator", "is", "exhausted", ")", "." ]
python
train