repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
chrisspen/dtree
dtree.py
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L530-L541
def choose_attribute(data, attributes, class_attr, fitness, method): """ Cycles through all the attributes and returns the attribute with the highest information gain (or lowest entropy). """ best = (-1e999999, None) for attr in attributes: if attr == class_attr: continue gain = fitness(data, attr, class_attr, method=method) best = max(best, (gain, attr)) return best[1]
[ "def", "choose_attribute", "(", "data", ",", "attributes", ",", "class_attr", ",", "fitness", ",", "method", ")", ":", "best", "=", "(", "-", "1e999999", ",", "None", ")", "for", "attr", "in", "attributes", ":", "if", "attr", "==", "class_attr", ":", "...
Cycles through all the attributes and returns the attribute with the highest information gain (or lowest entropy).
[ "Cycles", "through", "all", "the", "attributes", "and", "returns", "the", "attribute", "with", "the", "highest", "information", "gain", "(", "or", "lowest", "entropy", ")", "." ]
python
train
Nic30/hwt
hwt/hdl/types/bitValFunctions.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/types/bitValFunctions.py#L91-L144
def bitsCmp(self, other, op, evalFn=None): """ :attention: If other is Bool signal convert this to bool (not ideal, due VHDL event operator) """ other = toHVal(other) t = self._dtype ot = other._dtype iamVal = isinstance(self, Value) otherIsVal = isinstance(other, Value) if evalFn is None: evalFn = op._evalFn if iamVal and otherIsVal: if ot == BOOL: self = self._auto_cast(BOOL) elif t == ot: pass elif isinstance(ot, Integer): other = other._auto_cast(t) else: raise TypeError("Values of types (%r, %r) are not comparable" % ( self._dtype, other._dtype)) return bitsCmp__val(self, other, op, evalFn) else: if ot == BOOL: self = self._auto_cast(BOOL) elif t == ot: pass elif isinstance(ot, Integer): other = other._auto_cast(self._dtype) else: raise TypeError("Values of types (%r, %r) are not comparable" % ( self._dtype, other._dtype)) # try to reduce useless cmp res = None if otherIsVal and other._isFullVld(): res = bitsCmp_detect_useless_cmp(self, other, op) elif iamVal and self._isFullVld(): res = bitsCmp_detect_useless_cmp(other, self, CMP_OP_REVERSE[op]) if res is None: pass elif isinstance(res, Value): return res else: assert res == AllOps.EQ, res op = res return Operator.withRes(op, [self, other], BOOL)
[ "def", "bitsCmp", "(", "self", ",", "other", ",", "op", ",", "evalFn", "=", "None", ")", ":", "other", "=", "toHVal", "(", "other", ")", "t", "=", "self", ".", "_dtype", "ot", "=", "other", ".", "_dtype", "iamVal", "=", "isinstance", "(", "self", ...
:attention: If other is Bool signal convert this to bool (not ideal, due VHDL event operator)
[ ":", "attention", ":", "If", "other", "is", "Bool", "signal", "convert", "this", "to", "bool", "(", "not", "ideal", "due", "VHDL", "event", "operator", ")" ]
python
test
tmoerman/arboreto
arboreto/core.py
https://github.com/tmoerman/arboreto/blob/3ff7b6f987b32e5774771751dea646fa6feaaa52/arboreto/core.py#L85-L102
def to_tf_matrix(expression_matrix, gene_names, tf_names): """ :param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: a list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: a list of transcription factor names. Should be a subset of gene_names. :return: tuple of: 0: A numpy matrix representing the predictor matrix for the regressions. 1: The gene names corresponding to the columns in the predictor matrix. """ tuples = [(index, gene) for index, gene in enumerate(gene_names) if gene in tf_names] tf_indices = [t[0] for t in tuples] tf_matrix_names = [t[1] for t in tuples] return expression_matrix[:, tf_indices], tf_matrix_names
[ "def", "to_tf_matrix", "(", "expression_matrix", ",", "gene_names", ",", "tf_names", ")", ":", "tuples", "=", "[", "(", "index", ",", "gene", ")", "for", "index", ",", "gene", "in", "enumerate", "(", "gene_names", ")", "if", "gene", "in", "tf_names", "]"...
:param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: a list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: a list of transcription factor names. Should be a subset of gene_names. :return: tuple of: 0: A numpy matrix representing the predictor matrix for the regressions. 1: The gene names corresponding to the columns in the predictor matrix.
[ ":", "param", "expression_matrix", ":", "numpy", "matrix", ".", "Rows", "are", "observations", "and", "columns", "are", "genes", ".", ":", "param", "gene_names", ":", "a", "list", "of", "gene", "names", ".", "Each", "entry", "corresponds", "to", "the", "ex...
python
train
ceph/ceph-deploy
ceph_deploy/mon.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mon.py#L290-L304
def hostname_is_compatible(conn, logger, provided_hostname): """ Make sure that the host that we are connecting to has the same value as the `hostname` in the remote host, otherwise mons can fail not reaching quorum. """ logger.debug('determining if provided host has same hostname in remote') remote_hostname = conn.remote_module.shortname() if remote_hostname == provided_hostname: return logger.warning('*'*80) logger.warning('provided hostname must match remote hostname') logger.warning('provided hostname: %s' % provided_hostname) logger.warning('remote hostname: %s' % remote_hostname) logger.warning('monitors may not reach quorum and create-keys will not complete') logger.warning('*'*80)
[ "def", "hostname_is_compatible", "(", "conn", ",", "logger", ",", "provided_hostname", ")", ":", "logger", ".", "debug", "(", "'determining if provided host has same hostname in remote'", ")", "remote_hostname", "=", "conn", ".", "remote_module", ".", "shortname", "(", ...
Make sure that the host that we are connecting to has the same value as the `hostname` in the remote host, otherwise mons can fail not reaching quorum.
[ "Make", "sure", "that", "the", "host", "that", "we", "are", "connecting", "to", "has", "the", "same", "value", "as", "the", "hostname", "in", "the", "remote", "host", "otherwise", "mons", "can", "fail", "not", "reaching", "quorum", "." ]
python
train
calmjs/calmjs
src/calmjs/dist.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L398-L484
def build_helpers_module_registry_dependencies(registry_name='calmjs.module'): """ Return a tuple of funtions that will provide the functions that return the relevant sets of module registry records based on the dependencies defined for the provided packages. """ def get_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Get dependencies for the given package names from module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve the exported location for just the package. """ working_set = working_set or default_working_set registry = get(registry_name) if not isinstance(registry, BaseModuleRegistry): return {} result = {} for pkg_name in pkg_names: result.update(registry.get_records_for_package(pkg_name)) return result def _flatten_module_registry_dependencies( pkg_names, registry_name, find_dists, working_set): """ Flatten dependencies for the given package names from module registry identified by registry name using the find_dists function on the given working_set. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ result = {} registry = get(registry_name) if not isinstance(registry, BaseModuleRegistry): return result dists = find_dists(pkg_names, working_set=working_set) for dist in dists: result.update(registry.get_records_for_package(dist.project_name)) return result def flatten_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Flatten dependencies for the specified packages from the module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ working_set = working_set or default_working_set return _flatten_module_registry_dependencies( pkg_names, registry_name, find_packages_requirements_dists, working_set) def flatten_parents_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Flatten dependencies for the parents of the specified packages from the module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ working_set = working_set or default_working_set return _flatten_module_registry_dependencies( pkg_names, registry_name, find_packages_parents_requirements_dists, working_set) return ( get_module_registry_dependencies, flatten_module_registry_dependencies, flatten_parents_module_registry_dependencies, )
[ "def", "build_helpers_module_registry_dependencies", "(", "registry_name", "=", "'calmjs.module'", ")", ":", "def", "get_module_registry_dependencies", "(", "pkg_names", ",", "registry_name", "=", "registry_name", ",", "working_set", "=", "None", ")", ":", "\"\"\"\n ...
Return a tuple of funtions that will provide the functions that return the relevant sets of module registry records based on the dependencies defined for the provided packages.
[ "Return", "a", "tuple", "of", "funtions", "that", "will", "provide", "the", "functions", "that", "return", "the", "relevant", "sets", "of", "module", "registry", "records", "based", "on", "the", "dependencies", "defined", "for", "the", "provided", "packages", ...
python
train
stephanepechard/projy
projy/cmdline.py
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/cmdline.py#L11-L29
def docopt_arguments(): """ Creates beautiful command-line interfaces. See https://github.com/docopt/docopt """ doc = """Projy: Create templated project. Usage: projy <template> <project> [<substitution>...] projy -i | --info <template> projy -l | --list projy -h | --help projy -v | --version Options: -i, --info Print information on a specific template. -l, --list Print available template list. -h, --help Show this help message and exit. -v, --version Show program's version number and exit. """ from projy.docopt import docopt return docopt(doc, argv=sys.argv[1:], version='0.1')
[ "def", "docopt_arguments", "(", ")", ":", "doc", "=", "\"\"\"Projy: Create templated project.\n\n Usage: projy <template> <project> [<substitution>...]\n projy -i | --info <template>\n projy -l | --list\n projy -h | --help\n projy -v | --version\n\n Option...
Creates beautiful command-line interfaces. See https://github.com/docopt/docopt
[ "Creates", "beautiful", "command", "-", "line", "interfaces", ".", "See", "https", ":", "//", "github", ".", "com", "/", "docopt", "/", "docopt" ]
python
train
DataONEorg/d1_python
lib_common/src/d1_common/ext/mimeparser.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/ext/mimeparser.py#L50-L66
def parse_mime_type(mime_type): """Carves up a mime-type and returns a tuple of the (type, subtype, params) where 'params' is a dictionary of all the parameters for the media range. For example, the media range 'application/xhtml;q=0.5' would get parsed into: ('application', 'xhtml', {'q', '0.5'}) """ parts = mime_type.split(";") params = dict([tuple([s.strip() for s in param.split("=")]) for param in parts[1:]]) full_type = parts[0].strip() # Java URLConnection class sends an Accept header that includes a single "*" # Turn it into a legal wildcard. if full_type == '*': full_type = '*/*' (type, subtype) = full_type.split("/") return (type.strip(), subtype.strip(), params)
[ "def", "parse_mime_type", "(", "mime_type", ")", ":", "parts", "=", "mime_type", ".", "split", "(", "\";\"", ")", "params", "=", "dict", "(", "[", "tuple", "(", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "param", ".", "split", "(", "\"=\""...
Carves up a mime-type and returns a tuple of the (type, subtype, params) where 'params' is a dictionary of all the parameters for the media range. For example, the media range 'application/xhtml;q=0.5' would get parsed into: ('application', 'xhtml', {'q', '0.5'})
[ "Carves", "up", "a", "mime", "-", "type", "and", "returns", "a", "tuple", "of", "the", "(", "type", "subtype", "params", ")", "where", "params", "is", "a", "dictionary", "of", "all", "the", "parameters", "for", "the", "media", "range", ".", "For", "exa...
python
train
Fantomas42/django-blog-zinnia
zinnia/xmlrpc/metaweblog.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/xmlrpc/metaweblog.py#L127-L155
def post_structure(entry, site): """ A post structure with extensions. """ author = entry.authors.all()[0] return {'title': entry.title, 'description': six.text_type(entry.html_content), 'link': '%s://%s%s' % (PROTOCOL, site.domain, entry.get_absolute_url()), # Basic Extensions 'permaLink': '%s://%s%s' % (PROTOCOL, site.domain, entry.get_absolute_url()), 'categories': [cat.title for cat in entry.categories.all()], 'dateCreated': DateTime(entry.creation_date.isoformat()), 'postid': entry.pk, 'userid': author.get_username(), # Useful Movable Type Extensions 'mt_excerpt': entry.excerpt, 'mt_allow_comments': int(entry.comment_enabled), 'mt_allow_pings': (int(entry.pingback_enabled) or int(entry.trackback_enabled)), 'mt_keywords': entry.tags, # Useful Wordpress Extensions 'wp_author': author.get_username(), 'wp_author_id': author.pk, 'wp_author_display_name': author.__str__(), 'wp_password': entry.password, 'wp_slug': entry.slug, 'sticky': entry.featured}
[ "def", "post_structure", "(", "entry", ",", "site", ")", ":", "author", "=", "entry", ".", "authors", ".", "all", "(", ")", "[", "0", "]", "return", "{", "'title'", ":", "entry", ".", "title", ",", "'description'", ":", "six", ".", "text_type", "(", ...
A post structure with extensions.
[ "A", "post", "structure", "with", "extensions", "." ]
python
train
alphatwirl/alphatwirl
alphatwirl/concurrently/WorkingArea.py
https://github.com/alphatwirl/alphatwirl/blob/5138eeba6cd8a334ba52d6c2c022b33c61e3ba38/alphatwirl/concurrently/WorkingArea.py#L62-L73
def open(self): """Open the working area Returns ------- None """ self.path = self._prepare_dir(self.topdir) self._copy_executable(area_path=self.path) self._save_logging_levels(area_path=self.path) self._put_python_modules(modules=self.python_modules, area_path=self.path)
[ "def", "open", "(", "self", ")", ":", "self", ".", "path", "=", "self", ".", "_prepare_dir", "(", "self", ".", "topdir", ")", "self", ".", "_copy_executable", "(", "area_path", "=", "self", ".", "path", ")", "self", ".", "_save_logging_levels", "(", "a...
Open the working area Returns ------- None
[ "Open", "the", "working", "area" ]
python
valid
DMSC-Instrument-Data/lewis
src/lewis/adapters/stream.py
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/adapters/stream.py#L734-L760
def _bind_device(self): """ This method implements ``_bind_device`` from :class:`~lewis.core.devices.InterfaceBase`. It binds Cmd and Var definitions to implementations in Interface and Device. """ patterns = set() self.bound_commands = [] for cmd in self.commands: bound = cmd.bind(self) or cmd.bind(self.device) or None if bound is None: raise RuntimeError( 'Unable to produce callable object for non-existing member \'{}\' ' 'of device or interface.'.format(cmd.func)) for bound_cmd in bound: pattern = bound_cmd.matcher.pattern if pattern in patterns: raise RuntimeError( 'The regular expression {} is ' 'associated with multiple commands.'.format(pattern)) patterns.add(pattern) self.bound_commands.append(bound_cmd)
[ "def", "_bind_device", "(", "self", ")", ":", "patterns", "=", "set", "(", ")", "self", ".", "bound_commands", "=", "[", "]", "for", "cmd", "in", "self", ".", "commands", ":", "bound", "=", "cmd", ".", "bind", "(", "self", ")", "or", "cmd", ".", ...
This method implements ``_bind_device`` from :class:`~lewis.core.devices.InterfaceBase`. It binds Cmd and Var definitions to implementations in Interface and Device.
[ "This", "method", "implements", "_bind_device", "from", ":", "class", ":", "~lewis", ".", "core", ".", "devices", ".", "InterfaceBase", ".", "It", "binds", "Cmd", "and", "Var", "definitions", "to", "implementations", "in", "Interface", "and", "Device", "." ]
python
train
metavee/batchproc
batchproc/util.py
https://github.com/metavee/batchproc/blob/aa084a2ac8ab7950f7a7d3adb54b0cf010c6a935/batchproc/util.py#L11-L26
def expand_folder(files): """Return a clone of file list files where all directories are recursively replaced with their contents.""" expfiles = [] for file in files: if os.path.isdir(file): for dirpath, dirnames, filenames in os.walk(file): for filename in filenames: expfiles.append(os.path.join(dirpath, filename)) else: expfiles.append(file) for path in expfiles: if not os.path.exists(path): sys.stderr.write('%s: No such file or directory\n' % path) return expfiles
[ "def", "expand_folder", "(", "files", ")", ":", "expfiles", "=", "[", "]", "for", "file", "in", "files", ":", "if", "os", ".", "path", ".", "isdir", "(", "file", ")", ":", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk"...
Return a clone of file list files where all directories are recursively replaced with their contents.
[ "Return", "a", "clone", "of", "file", "list", "files", "where", "all", "directories", "are", "recursively", "replaced", "with", "their", "contents", "." ]
python
train
inveniosoftware/invenio-previewer
invenio_previewer/extensions/zip.py
https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/zip.py#L26-L67
def make_tree(file): """Create tree structure from ZIP archive.""" max_files_count = current_app.config.get('PREVIEWER_ZIP_MAX_FILES', 1000) tree = {'type': 'folder', 'id': -1, 'children': {}} try: with file.open() as fp: zf = zipfile.ZipFile(fp) # Detect filenames encoding. sample = ' '.join(zf.namelist()[:max_files_count]) if not isinstance(sample, binary_type): sample = sample.encode('utf-16be') encoding = chardet.detect(sample).get('encoding', 'utf-8') for i, info in enumerate(zf.infolist()): if i > max_files_count: raise BufferError('Too many files inside the ZIP file.') comps = info.filename.split(os.sep) node = tree for c in comps: if not isinstance(c, text_type): c = c.decode(encoding) if c not in node['children']: if c == '': node['type'] = 'folder' continue node['children'][c] = { 'name': c, 'type': 'item', 'id': 'item{0}'.format(i), 'children': {} } node = node['children'][c] node['size'] = info.file_size except BufferError: return tree, True, None except (zipfile.LargeZipFile): return tree, False, 'Zipfile is too large to be previewed.' except Exception as e: current_app.logger.warning(str(e), exc_info=True) return tree, False, 'Zipfile is not previewable.' return tree, False, None
[ "def", "make_tree", "(", "file", ")", ":", "max_files_count", "=", "current_app", ".", "config", ".", "get", "(", "'PREVIEWER_ZIP_MAX_FILES'", ",", "1000", ")", "tree", "=", "{", "'type'", ":", "'folder'", ",", "'id'", ":", "-", "1", ",", "'children'", "...
Create tree structure from ZIP archive.
[ "Create", "tree", "structure", "from", "ZIP", "archive", "." ]
python
train
ninuxorg/nodeshot
nodeshot/community/mailing/models/outward.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/mailing/models/outward.py#L67-L166
def get_recipients(self): """ Determine recipients depending on selected filtering which can be either: * group based * layer based * user based Choosing "group" and "layer" filtering together has the effect of sending the message only to users for which the following conditions are both true: * have a node assigned to one of the selected layers * are part of any of the specified groups (eg: registered, community, trusted) The user based filtering has instead the effect of translating in an **OR** query. Here's a practical example: if selecting "group" and "user" filtering the message will be sent to all the users for which ANY of the following conditions is true: * are part of any of the specified groups (eg: registered, community, trusted) * selected users """ # user model User = get_user_model() # prepare email list emails = [] # the following code is a bit ugly. Considering the titanic amount of work required to build all # the cools functionalities that I have in my mind, I can't be bothered to waste time on making it nicer right now. # if you have ideas on how to improve it to make it cleaner and less cluttered, please join in # this method has unit tests written for it, therefore if you try to change it be sure to check unit tests do not fail after your changes # python manage.py test mailing # send to all case if not self.is_filtered: # retrieve only email DB column of all active users users = User.objects.filter(is_active=True).only('email') # loop over users list for user in users: # add email to the recipient list if not already there if user.email not in emails: emails += [user.email] else: # selected users if FILTERS.get('users') in self.filters: # retrieve selected users users = self.users.all().only('email') # loop over selected users for user in users: # add email to the recipient list if not already there if user.email not in emails: emails += [user.email] # Q is a django object for "complex" filtering queries (not that complex in this case) # init empty Q object that will be needed in case of group filtering q = Q() q2 = Q() # if group filtering is checked if FILTERS.get('groups') in self.filters: # loop over each group for group in self.groups: # if not superusers if group != '0': # add the group to the Q object # this means that the query will look for users of that specific group q = q | Q(groups=int(group)) q2 = q2 | Q(user__groups=int(group)) else: # this must be done manually because superusers is not a group but an attribute of the User model q = q | Q(is_superuser=True) q2 = q2 | Q(user__is_superuser=True) # plus users must be active q = q & Q(is_active=True) # if layer filtering is checked if FILTERS.get('layers') in self.filters: # retrieve non-external layers layers = self.layers.all().only('id') # init empty q3 q3 = Q() # loop over layers to form q3 object for layer in layers: q3 = q3 | Q(layer=layer) # q2: user group if present # q3: layers # retrieve nodes nodes = Node.objects.filter(q2 & q3) # loop over nodes of a layer and get their email for node in nodes: # add email to the recipient list if not already there if node.user.email not in emails: emails += [node.user.email] # else if group filterins is checked but not layers elif FILTERS.get('groups') in self.filters and not FILTERS.get('layers') in self.filters: # retrieve only email DB column of all active users users = User.objects.filter(q).only('email') # loop over users list for user in users: # add email to the recipient list if not already there if user.email not in emails: emails += [user.email] return emails
[ "def", "get_recipients", "(", "self", ")", ":", "# user model", "User", "=", "get_user_model", "(", ")", "# prepare email list", "emails", "=", "[", "]", "# the following code is a bit ugly. Considering the titanic amount of work required to build all", "# the cools functionaliti...
Determine recipients depending on selected filtering which can be either: * group based * layer based * user based Choosing "group" and "layer" filtering together has the effect of sending the message only to users for which the following conditions are both true: * have a node assigned to one of the selected layers * are part of any of the specified groups (eg: registered, community, trusted) The user based filtering has instead the effect of translating in an **OR** query. Here's a practical example: if selecting "group" and "user" filtering the message will be sent to all the users for which ANY of the following conditions is true: * are part of any of the specified groups (eg: registered, community, trusted) * selected users
[ "Determine", "recipients", "depending", "on", "selected", "filtering", "which", "can", "be", "either", ":", "*", "group", "based", "*", "layer", "based", "*", "user", "based" ]
python
train
gwastro/pycbc-glue
pycbc_glue/ligolw/array.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/array.py#L117-L151
def from_array(name, array, dim_names = None): """ Construct a LIGO Light Weight XML Array document subtree from a numpy array object. Example: >>> import numpy, sys >>> a = numpy.arange(12, dtype = "double") >>> a.shape = (4, 3) >>> from_array(u"test", a).write(sys.stdout) # doctest: +NORMALIZE_WHITESPACE <Array Type="real_8" Name="test:array"> <Dim>3</Dim> <Dim>4</Dim> <Stream Delimiter=" " Type="Local"> 0 3 6 9 1 4 7 10 2 5 8 11 </Stream> </Array> """ # Type must be set for .__init__(); easier to set Name afterwards # to take advantage of encoding handled by attribute proxy doc = Array(Attributes({u"Type": ligolwtypes.FromNumPyType[str(array.dtype)]})) doc.Name = name for n, dim in enumerate(reversed(array.shape)): child = ligolw.Dim() if dim_names is not None: child.Name = dim_names[n] child.pcdata = unicode(dim) doc.appendChild(child) child = ArrayStream(Attributes({u"Type": ArrayStream.Type.default, u"Delimiter": ArrayStream.Delimiter.default})) doc.appendChild(child) doc.array = array return doc
[ "def", "from_array", "(", "name", ",", "array", ",", "dim_names", "=", "None", ")", ":", "# Type must be set for .__init__(); easier to set Name afterwards", "# to take advantage of encoding handled by attribute proxy", "doc", "=", "Array", "(", "Attributes", "(", "{", "u\...
Construct a LIGO Light Weight XML Array document subtree from a numpy array object. Example: >>> import numpy, sys >>> a = numpy.arange(12, dtype = "double") >>> a.shape = (4, 3) >>> from_array(u"test", a).write(sys.stdout) # doctest: +NORMALIZE_WHITESPACE <Array Type="real_8" Name="test:array"> <Dim>3</Dim> <Dim>4</Dim> <Stream Delimiter=" " Type="Local"> 0 3 6 9 1 4 7 10 2 5 8 11 </Stream> </Array>
[ "Construct", "a", "LIGO", "Light", "Weight", "XML", "Array", "document", "subtree", "from", "a", "numpy", "array", "object", "." ]
python
train
pecan/pecan
pecan/decorators.py
https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/decorators.py#L25-L95
def expose(template=None, generic=False, route=None, **kw): ''' Decorator used to flag controller methods as being "exposed" for access via HTTP, and to configure that access. :param template: The path to a template, relative to the base template directory. Can also be passed a string representing a special or custom renderer, such as ``'json'`` for :ref:`expose_json`. :param content_type: The content-type to use for this template. :param generic: A boolean which flags this as a "generic" controller, which uses generic functions based upon ``functools.singledispatch`` generic functions. Allows you to split a single controller into multiple paths based upon HTTP method. :param route: The name of the path segment to match (excluding separator characters, like `/`). Defaults to the name of the function itself, but this can be used to resolve paths which are not valid Python function names, e.g., if you wanted to route a function to `some-special-path'. ''' content_type = kw.get('content_type', 'text/html') if template == 'json': content_type = 'application/json' def decorate(f): # flag the method as exposed f.exposed = True cfg = _cfg(f) cfg['explicit_content_type'] = 'content_type' in kw if route: # This import is here to avoid a circular import issue from pecan import routing if cfg.get('generic_handler'): raise ValueError( 'Path segments cannot be overridden for generic ' 'controllers.' ) routing.route(route, f) # set a "pecan" attribute, where we will store details cfg['content_type'] = content_type cfg.setdefault('template', []).append(template) cfg.setdefault('content_types', {})[content_type] = template # handle generic controllers if generic: if f.__name__ in ('_default', '_lookup', '_route'): raise ValueError( 'The special method %s cannot be used as a generic ' 'controller' % f.__name__ ) cfg['generic'] = True cfg['generic_handlers'] = dict(DEFAULT=f) cfg['allowed_methods'] = [] f.when = when_for(f) # store the arguments for this controller method cfg['argspec'] = getargspec(f) return f return decorate
[ "def", "expose", "(", "template", "=", "None", ",", "generic", "=", "False", ",", "route", "=", "None", ",", "*", "*", "kw", ")", ":", "content_type", "=", "kw", ".", "get", "(", "'content_type'", ",", "'text/html'", ")", "if", "template", "==", "'js...
Decorator used to flag controller methods as being "exposed" for access via HTTP, and to configure that access. :param template: The path to a template, relative to the base template directory. Can also be passed a string representing a special or custom renderer, such as ``'json'`` for :ref:`expose_json`. :param content_type: The content-type to use for this template. :param generic: A boolean which flags this as a "generic" controller, which uses generic functions based upon ``functools.singledispatch`` generic functions. Allows you to split a single controller into multiple paths based upon HTTP method. :param route: The name of the path segment to match (excluding separator characters, like `/`). Defaults to the name of the function itself, but this can be used to resolve paths which are not valid Python function names, e.g., if you wanted to route a function to `some-special-path'.
[ "Decorator", "used", "to", "flag", "controller", "methods", "as", "being", "exposed", "for", "access", "via", "HTTP", "and", "to", "configure", "that", "access", "." ]
python
train
adaptive-learning/proso-apps
proso_user/views.py
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_user/views.py#L341-L370
def initmobile_view(request): """ Create lazy user with a password. Used from the Android app. Also returns csrf token. GET parameters: username: user's name password: user's password """ if 'username' in request.GET and 'password' in request.GET: username = request.GET['username'] password = request.GET['password'] user = auth.authenticate(username=username, password=password) if user is not None: if user.is_active: login(request, user) else: user = request.user response = { 'username': user.username, 'csrftoken': get_token(request), } if not user.has_usable_password(): password = User.objects.make_random_password() user.set_password(password) user.save() response['password'] = password return HttpResponse(json.dumps(response))
[ "def", "initmobile_view", "(", "request", ")", ":", "if", "'username'", "in", "request", ".", "GET", "and", "'password'", "in", "request", ".", "GET", ":", "username", "=", "request", ".", "GET", "[", "'username'", "]", "password", "=", "request", ".", "...
Create lazy user with a password. Used from the Android app. Also returns csrf token. GET parameters: username: user's name password: user's password
[ "Create", "lazy", "user", "with", "a", "password", ".", "Used", "from", "the", "Android", "app", ".", "Also", "returns", "csrf", "token", "." ]
python
train
softlayer/softlayer-python
SoftLayer/auth.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/auth.py#L50-L57
def get_request(self, request): """Sets token-based auth headers.""" request.headers['authenticate'] = { 'complexType': 'PortalLoginToken', 'userId': self.user_id, 'authToken': self.auth_token, } return request
[ "def", "get_request", "(", "self", ",", "request", ")", ":", "request", ".", "headers", "[", "'authenticate'", "]", "=", "{", "'complexType'", ":", "'PortalLoginToken'", ",", "'userId'", ":", "self", ".", "user_id", ",", "'authToken'", ":", "self", ".", "a...
Sets token-based auth headers.
[ "Sets", "token", "-", "based", "auth", "headers", "." ]
python
train
loli/medpy
medpy/core/logger.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/core/logger.py#L103-L119
def setHandler(self, hdlr): r"""Replace the current handler with a new one. Parameters ---------- hdlr : logging.Handler A subclass of Handler that should used to handle the logging output. Notes ----- If none should be replaces, but just one added, use the parent classes addHandler() method. """ if None != self._handler: self.removeHandler(self._handler) self._handler = hdlr self.addHandler(self._handler)
[ "def", "setHandler", "(", "self", ",", "hdlr", ")", ":", "if", "None", "!=", "self", ".", "_handler", ":", "self", ".", "removeHandler", "(", "self", ".", "_handler", ")", "self", ".", "_handler", "=", "hdlr", "self", ".", "addHandler", "(", "self", ...
r"""Replace the current handler with a new one. Parameters ---------- hdlr : logging.Handler A subclass of Handler that should used to handle the logging output. Notes ----- If none should be replaces, but just one added, use the parent classes addHandler() method.
[ "r", "Replace", "the", "current", "handler", "with", "a", "new", "one", ".", "Parameters", "----------", "hdlr", ":", "logging", ".", "Handler", "A", "subclass", "of", "Handler", "that", "should", "used", "to", "handle", "the", "logging", "output", ".", "N...
python
train
SergeySatskiy/cdm-pythonparser
legacy/src/cdmbriefparser.py
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/legacy/src/cdmbriefparser.py#L540-L546
def _onDecorator( self, name, line, pos, absPosition ): " Memorizes a function or a class decorator " # A class or a function must be on the top of the stack self.objectsStack[ -1 ].decorators.append( Decorator( name, line, pos, absPosition ) ) return
[ "def", "_onDecorator", "(", "self", ",", "name", ",", "line", ",", "pos", ",", "absPosition", ")", ":", "# A class or a function must be on the top of the stack", "self", ".", "objectsStack", "[", "-", "1", "]", ".", "decorators", ".", "append", "(", "Decorator"...
Memorizes a function or a class decorator
[ "Memorizes", "a", "function", "or", "a", "class", "decorator" ]
python
train
Qiskit/qiskit-terra
qiskit/converters/ast_to_dag.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/converters/ast_to_dag.py#L214-L304
def _process_node(self, node): """Carry out the action associated with a node.""" if node.type == "program": self._process_children(node) elif node.type == "qreg": qreg = QuantumRegister(node.index, node.name) self.dag.add_qreg(qreg) elif node.type == "creg": creg = ClassicalRegister(node.index, node.name) self.dag.add_creg(creg) elif node.type == "id": raise QiskitError("internal error: _process_node on id") elif node.type == "int": raise QiskitError("internal error: _process_node on int") elif node.type == "real": raise QiskitError("internal error: _process_node on real") elif node.type == "indexed_id": raise QiskitError("internal error: _process_node on indexed_id") elif node.type == "id_list": # We process id_list nodes when they are leaves of barriers. return [self._process_bit_id(node_children) for node_children in node.children] elif node.type == "primary_list": # We should only be called for a barrier. return [self._process_bit_id(m) for m in node.children] elif node.type == "gate": self._process_gate(node) elif node.type == "custom_unitary": self._process_custom_unitary(node) elif node.type == "universal_unitary": args = self._process_node(node.children[0]) qid = self._process_bit_id(node.children[1]) for element in qid: self.dag.apply_operation_back(UBase(*args, element), self.condition) elif node.type == "cnot": self._process_cnot(node) elif node.type == "expression_list": return node.children elif node.type == "binop": raise QiskitError("internal error: _process_node on binop") elif node.type == "prefix": raise QiskitError("internal error: _process_node on prefix") elif node.type == "measure": self._process_measure(node) elif node.type == "format": self.version = node.version() elif node.type == "barrier": ids = self._process_node(node.children[0]) qubits = [] for qubit in ids: for j, _ in enumerate(qubit): qubits.append(qubit[j]) self.dag.apply_operation_back(Barrier(len(qubits)), qubits, []) elif node.type == "reset": id0 = self._process_bit_id(node.children[0]) for i, _ in enumerate(id0): self.dag.apply_operation_back(Reset(), [id0[i]], [], self.condition) elif node.type == "if": self._process_if(node) elif node.type == "opaque": self._process_gate(node, opaque=True) elif node.type == "external": raise QiskitError("internal error: _process_node on external") else: raise QiskitError("internal error: undefined node type", node.type, "line=%s" % node.line, "file=%s" % node.file) return None
[ "def", "_process_node", "(", "self", ",", "node", ")", ":", "if", "node", ".", "type", "==", "\"program\"", ":", "self", ".", "_process_children", "(", "node", ")", "elif", "node", ".", "type", "==", "\"qreg\"", ":", "qreg", "=", "QuantumRegister", "(", ...
Carry out the action associated with a node.
[ "Carry", "out", "the", "action", "associated", "with", "a", "node", "." ]
python
test
saltstack/salt
salt/modules/win_iis.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_iis.py#L197-L264
def list_sites(): ''' List all the currently deployed websites. Returns: dict: A dictionary of the IIS sites and their properties. CLI Example: .. code-block:: bash salt '*' win_iis.list_sites ''' ret = dict() ps_cmd = ['Get-ChildItem', '-Path', r"'IIS:\Sites'", '|', 'Select-Object applicationPool, applicationDefaults, Bindings, ID, Name, PhysicalPath, State'] keep_keys = ('certificateHash', 'certificateStoreName', 'protocol', 'sslFlags') cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True) try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) except ValueError: raise CommandExecutionError('Unable to parse return data as Json.') for item in items: bindings = dict() for binding in item['bindings']['Collection']: # Ignore bindings which do not have host names if binding['protocol'] not in ['http', 'https']: continue filtered_binding = dict() for key in binding: if key in keep_keys: filtered_binding.update({key.lower(): binding[key]}) binding_info = binding['bindingInformation'].split(':', 2) ipaddress, port, hostheader = [element.strip() for element in binding_info] filtered_binding.update({'hostheader': hostheader, 'ipaddress': ipaddress, 'port': port}) bindings[binding['bindingInformation']] = filtered_binding # ApplicationDefaults application_defaults = dict() for attribute in item['applicationDefaults']['Attributes']: application_defaults.update({attribute['Name']: attribute['Value']}) # ApplicationDefaults ret[item['name']] = {'apppool': item['applicationPool'], 'bindings': bindings, 'applicationDefaults': application_defaults, 'id': item['id'], 'state': item['state'], 'sourcepath': item['physicalPath']} if not ret: log.warning('No sites found in output: %s', cmd_ret['stdout']) return ret
[ "def", "list_sites", "(", ")", ":", "ret", "=", "dict", "(", ")", "ps_cmd", "=", "[", "'Get-ChildItem'", ",", "'-Path'", ",", "r\"'IIS:\\Sites'\"", ",", "'|'", ",", "'Select-Object applicationPool, applicationDefaults, Bindings, ID, Name, PhysicalPath, State'", "]", "ke...
List all the currently deployed websites. Returns: dict: A dictionary of the IIS sites and their properties. CLI Example: .. code-block:: bash salt '*' win_iis.list_sites
[ "List", "all", "the", "currently", "deployed", "websites", "." ]
python
train
Bearle/django-private-chat
django_private_chat/handlers.py
https://github.com/Bearle/django-private-chat/blob/5b51e65875795c5c0ce21bb631c53bd3aac4c26b/django_private_chat/handlers.py#L14-L24
def target_message(conn, payload): """ Distibuted payload (message) to one connection :param conn: connection :param payload: payload(json dumpable) :return: """ try: yield from conn.send(json.dumps(payload)) except Exception as e: logger.debug('could not send', e)
[ "def", "target_message", "(", "conn", ",", "payload", ")", ":", "try", ":", "yield", "from", "conn", ".", "send", "(", "json", ".", "dumps", "(", "payload", ")", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "'could not send'...
Distibuted payload (message) to one connection :param conn: connection :param payload: payload(json dumpable) :return:
[ "Distibuted", "payload", "(", "message", ")", "to", "one", "connection", ":", "param", "conn", ":", "connection", ":", "param", "payload", ":", "payload", "(", "json", "dumpable", ")", ":", "return", ":" ]
python
train
projecthamster/hamster
src/hamster/lib/stuff.py
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/stuff.py#L85-L121
def format_duration(minutes, human = True): """formats duration in a human readable format. accepts either minutes or timedelta""" if isinstance(minutes, dt.timedelta): minutes = duration_minutes(minutes) if not minutes: if human: return "" else: return "00:00" if minutes < 0: # format_duration did not work for negative values anyway # return a warning return "NEGATIVE" hours = minutes / 60 minutes = minutes % 60 formatted_duration = "" if human: if minutes % 60 == 0: # duration in round hours formatted_duration += ("%dh") % (hours) elif hours == 0: # duration less than hour formatted_duration += ("%dmin") % (minutes % 60.0) else: # x hours, y minutes formatted_duration += ("%dh %dmin") % (hours, minutes % 60) else: formatted_duration += "%02d:%02d" % (hours, minutes) return formatted_duration
[ "def", "format_duration", "(", "minutes", ",", "human", "=", "True", ")", ":", "if", "isinstance", "(", "minutes", ",", "dt", ".", "timedelta", ")", ":", "minutes", "=", "duration_minutes", "(", "minutes", ")", "if", "not", "minutes", ":", "if", "human",...
formats duration in a human readable format. accepts either minutes or timedelta
[ "formats", "duration", "in", "a", "human", "readable", "format", ".", "accepts", "either", "minutes", "or", "timedelta" ]
python
train
chrisspen/burlap
burlap/deploy.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/deploy.py#L21-L30
def iter_dict_differences(a, b): """ Returns a generator yielding all the keys that have values that differ between each dictionary. """ common_keys = set(a).union(b) for k in common_keys: a_value = a.get(k) b_value = b.get(k) if a_value != b_value: yield k, (a_value, b_value)
[ "def", "iter_dict_differences", "(", "a", ",", "b", ")", ":", "common_keys", "=", "set", "(", "a", ")", ".", "union", "(", "b", ")", "for", "k", "in", "common_keys", ":", "a_value", "=", "a", ".", "get", "(", "k", ")", "b_value", "=", "b", ".", ...
Returns a generator yielding all the keys that have values that differ between each dictionary.
[ "Returns", "a", "generator", "yielding", "all", "the", "keys", "that", "have", "values", "that", "differ", "between", "each", "dictionary", "." ]
python
valid
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/storage_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/storage_v1_api.py#L35-L58
def create_storage_class(self, body, **kwargs): # noqa: E501 """create_storage_class # noqa: E501 create a StorageClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_storage_class(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1StorageClass body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1StorageClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_storage_class_with_http_info(body, **kwargs) # noqa: E501 else: (data) = self.create_storage_class_with_http_info(body, **kwargs) # noqa: E501 return data
[ "def", "create_storage_class", "(", "self", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "creat...
create_storage_class # noqa: E501 create a StorageClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_storage_class(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1StorageClass body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1StorageClass If the method is called asynchronously, returns the request thread.
[ "create_storage_class", "#", "noqa", ":", "E501" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L6809-L6832
def hx2dp(string): """ Convert a string representing a double precision number in a base 16 scientific notation into its equivalent double precision number. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/hx2dp_c.html :param string: Hex form string to convert to double precision. :type string: str :return: Double precision value to be returned, Or Error Message. :rtype: float or str """ string = stypes.stringToCharP(string) lenout = ctypes.c_int(80) errmsg = stypes.stringToCharP(lenout) number = ctypes.c_double() error = ctypes.c_int() libspice.hx2dp_c(string, lenout, ctypes.byref(number), ctypes.byref(error), errmsg) if not error.value: return number.value else: return stypes.toPythonString(errmsg)
[ "def", "hx2dp", "(", "string", ")", ":", "string", "=", "stypes", ".", "stringToCharP", "(", "string", ")", "lenout", "=", "ctypes", ".", "c_int", "(", "80", ")", "errmsg", "=", "stypes", ".", "stringToCharP", "(", "lenout", ")", "number", "=", "ctypes...
Convert a string representing a double precision number in a base 16 scientific notation into its equivalent double precision number. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/hx2dp_c.html :param string: Hex form string to convert to double precision. :type string: str :return: Double precision value to be returned, Or Error Message. :rtype: float or str
[ "Convert", "a", "string", "representing", "a", "double", "precision", "number", "in", "a", "base", "16", "scientific", "notation", "into", "its", "equivalent", "double", "precision", "number", "." ]
python
train
mikusjelly/apkutils
apkutils/apkfile.py
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1394-L1415
def _writecheck(self, zinfo): """Check for errors before writing a file to the archive.""" if zinfo.filename in self.NameToInfo: import warnings warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3) if self.mode not in ('w', 'x', 'a'): raise RuntimeError("write() requires mode 'w', 'x', or 'a'") if not self.fp: raise RuntimeError( "Attempt to write ZIP archive that was already closed") _check_compression(zinfo.compress_type) if not self._allowZip64: requires_zip64 = None if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: requires_zip64 = "Files count" elif zinfo.file_size > ZIP64_LIMIT: requires_zip64 = "Filesize" elif zinfo.header_offset > ZIP64_LIMIT: requires_zip64 = "Zipfile size" if requires_zip64: raise LargeZipFile(requires_zip64 + " would require ZIP64 extensions")
[ "def", "_writecheck", "(", "self", ",", "zinfo", ")", ":", "if", "zinfo", ".", "filename", "in", "self", ".", "NameToInfo", ":", "import", "warnings", "warnings", ".", "warn", "(", "'Duplicate name: %r'", "%", "zinfo", ".", "filename", ",", "stacklevel", "...
Check for errors before writing a file to the archive.
[ "Check", "for", "errors", "before", "writing", "a", "file", "to", "the", "archive", "." ]
python
train
androguard/androguard
generators/axplorer_to_androguard.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/generators/axplorer_to_androguard.py#L18-L53
def name_to_androguard(n): """ Convert a object or primitive name into androguard syntax For example: byte --> B foo.bar.bla --> Lfoo/bar/bla; [int --> [I There is also a special case, where some arrays are specified differently: B[] --> [B foo.bar.bla[] --> [Lfoo/bar/bla; :param n: :return: """ if n == "": return "" is_array = "" # FIXME what about n-dimensional arrays? if n.startswith("["): is_array = "[" n = n[1:] elif n.endswith("[]"): # Another special array type... # Probably a bug? See if n[:-2] in TYPE_DESCRIPTOR: return "[{}".format(n[0]) else: n = n[:-2] is_array = "[" if n in R_TYPE_DESCRIPTOR: return "{}{}".format(is_array, R_TYPE_DESCRIPTOR[n]) else: # assume class return "{}L{};".format(is_array, n.replace(".", "/"))
[ "def", "name_to_androguard", "(", "n", ")", ":", "if", "n", "==", "\"\"", ":", "return", "\"\"", "is_array", "=", "\"\"", "# FIXME what about n-dimensional arrays?", "if", "n", ".", "startswith", "(", "\"[\"", ")", ":", "is_array", "=", "\"[\"", "n", "=", ...
Convert a object or primitive name into androguard syntax For example: byte --> B foo.bar.bla --> Lfoo/bar/bla; [int --> [I There is also a special case, where some arrays are specified differently: B[] --> [B foo.bar.bla[] --> [Lfoo/bar/bla; :param n: :return:
[ "Convert", "a", "object", "or", "primitive", "name", "into", "androguard", "syntax" ]
python
train
OLC-Bioinformatics/sipprverse
MLSTsippr/mlst.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/MLSTsippr/mlst.py#L519-L556
def report_parse(self): """ If the pipeline has previously been run on these data, instead of reading through the results, parse the report instead """ # Initialise lists report_strains = list() genus_list = list() if self.analysistype == 'mlst': for sample in self.runmetadata.samples: try: genus_list.append(sample.general.referencegenus) except AttributeError: sample.general.referencegenus = 'ND' genus_list.append(sample.general.referencegenus) # Read in the report if self.analysistype == 'mlst': for genus in genus_list: try: report_name = os.path.join(self.reportpath, '{at}_{genus}.csv'.format(at=self.analysistype, genus=genus)) report_strains = self.report_read(report_strains=report_strains, report_name=report_name) except FileNotFoundError: report_name = self.report report_strains = self.report_read(report_strains=report_strains, report_name=report_name) else: report_name = self.report report_strains = self.report_read(report_strains=report_strains, report_name=report_name) # Populate strains not in the report with 'empty' GenObject with appropriate attributes for sample in self.runmetadata.samples: if sample.name not in report_strains: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].sequencetype = 'ND' sample[self.analysistype].matches = 0 sample[self.analysistype].results = dict()
[ "def", "report_parse", "(", "self", ")", ":", "# Initialise lists", "report_strains", "=", "list", "(", ")", "genus_list", "=", "list", "(", ")", "if", "self", ".", "analysistype", "==", "'mlst'", ":", "for", "sample", "in", "self", ".", "runmetadata", "."...
If the pipeline has previously been run on these data, instead of reading through the results, parse the report instead
[ "If", "the", "pipeline", "has", "previously", "been", "run", "on", "these", "data", "instead", "of", "reading", "through", "the", "results", "parse", "the", "report", "instead" ]
python
train
joytunes/JTLocalize
localization_flow/jtlocalize/prepare_for_translation.py
https://github.com/joytunes/JTLocalize/blob/87864dc60114e0e61c768d057c6eddfadff3f40a/localization_flow/jtlocalize/prepare_for_translation.py#L31-L60
def prepare_for_translation(localization_bundle_path): """ Prepares the localization bundle for translation. This means, after creating the strings files using genstrings.sh, this will produce '.pending' files, that contain the files that are yet to be translated. Args: localization_bundle_path (str): The path to the localization bundle. """ logging.info("Preparing for translation..") for strings_file in os.listdir(os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME)): if not strings_file.endswith(".strings"): continue strings_path = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME, strings_file) for lang_dir in os.listdir(localization_bundle_path): if lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME or lang_dir.startswith("."): continue dest_strings_path = os.path.join(localization_bundle_path, lang_dir, strings_file) pending_path = dest_strings_path + ".pending" excluded_path = dest_strings_path + ".excluded" if not os.path.exists(dest_strings_path): open_strings_file(dest_strings_path, "a").close() logging.info("Preparing diff for %s in %s", lang_dir, pending_path) localization_diff(strings_path, dest_strings_path, excluded_path, pending_path)
[ "def", "prepare_for_translation", "(", "localization_bundle_path", ")", ":", "logging", ".", "info", "(", "\"Preparing for translation..\"", ")", "for", "strings_file", "in", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "localization_bundle_path",...
Prepares the localization bundle for translation. This means, after creating the strings files using genstrings.sh, this will produce '.pending' files, that contain the files that are yet to be translated. Args: localization_bundle_path (str): The path to the localization bundle.
[ "Prepares", "the", "localization", "bundle", "for", "translation", "." ]
python
train
RI-imaging/nrefocus
examples/example_helper.py
https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/examples/example_helper.py#L7-L21
def load_cell(fname="HL60_field.zip"): "Load zip file and return complex field" here = op.dirname(op.abspath(__file__)) data = op.join(here, "data") arc = zipfile.ZipFile(op.join(data, fname)) for f in arc.filelist: with arc.open(f) as fd: if f.filename.count("imag"): imag = np.loadtxt(fd) elif f.filename.count("real"): real = np.loadtxt(fd) field = real + 1j * imag return field
[ "def", "load_cell", "(", "fname", "=", "\"HL60_field.zip\"", ")", ":", "here", "=", "op", ".", "dirname", "(", "op", ".", "abspath", "(", "__file__", ")", ")", "data", "=", "op", ".", "join", "(", "here", ",", "\"data\"", ")", "arc", "=", "zipfile", ...
Load zip file and return complex field
[ "Load", "zip", "file", "and", "return", "complex", "field" ]
python
train
saltstack/salt
salt/modules/network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/network.py#L286-L360
def _netstat_bsd(): ''' Return netstat information for BSD flavors ''' ret = [] if __grains__['kernel'] == 'NetBSD': for addr_family in ('inet', 'inet6'): cmd = 'netstat -f {0} -an | tail -n+3'.format(addr_family) out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() entry = { 'proto': comps[0], 'recv-q': comps[1], 'send-q': comps[2], 'local-address': comps[3], 'remote-address': comps[4] } if entry['proto'].startswith('tcp'): entry['state'] = comps[5] ret.append(entry) else: # Lookup TCP connections cmd = 'netstat -p tcp -an | tail -n+3' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({ 'proto': comps[0], 'recv-q': comps[1], 'send-q': comps[2], 'local-address': comps[3], 'remote-address': comps[4], 'state': comps[5]}) # Lookup UDP connections cmd = 'netstat -p udp -an | tail -n+3' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({ 'proto': comps[0], 'recv-q': comps[1], 'send-q': comps[2], 'local-address': comps[3], 'remote-address': comps[4]}) # Add in user and program info ppid = _ppid() if __grains__['kernel'] == 'OpenBSD': netinfo = _netinfo_openbsd() elif __grains__['kernel'] in ('FreeBSD', 'NetBSD'): netinfo = _netinfo_freebsd_netbsd() for idx in range(len(ret)): local = ret[idx]['local-address'] remote = ret[idx]['remote-address'] proto = ret[idx]['proto'] try: # Make a pointer to the info for this connection for easier # reference below ptr = netinfo[local][remote][proto] except KeyError: continue # Get the pid-to-ppid mappings for this connection conn_ppid = dict((x, y) for x, y in six.iteritems(ppid) if x in ptr) try: # Master pid for this connection will be the pid whose ppid isn't # in the subset dict we created above master_pid = next(iter( x for x, y in six.iteritems(conn_ppid) if y not in ptr )) except StopIteration: continue ret[idx]['user'] = ptr[master_pid]['user'] ret[idx]['program'] = '/'.join((master_pid, ptr[master_pid]['cmd'])) return ret
[ "def", "_netstat_bsd", "(", ")", ":", "ret", "=", "[", "]", "if", "__grains__", "[", "'kernel'", "]", "==", "'NetBSD'", ":", "for", "addr_family", "in", "(", "'inet'", ",", "'inet6'", ")", ":", "cmd", "=", "'netstat -f {0} -an | tail -n+3'", ".", "format",...
Return netstat information for BSD flavors
[ "Return", "netstat", "information", "for", "BSD", "flavors" ]
python
train
dhhagan/py-opc
opc/__init__.py
https://github.com/dhhagan/py-opc/blob/2c8f19530fb64bf5fd4ee0d694a47850161ed8a7/opc/__init__.py#L235-L258
def read_info_string(self): """Reads the information string for the OPC :rtype: string :Example: >>> alpha.read_info_string() 'OPC-N2 FirmwareVer=OPC-018.2....................BD' """ infostring = [] # Send the command byte and sleep for 9 ms self.cnxn.xfer([0x3F]) sleep(9e-3) # Read the info string by sending 60 empty bytes for i in range(60): resp = self.cnxn.xfer([0x00])[0] infostring.append(chr(resp)) sleep(0.1) return ''.join(infostring)
[ "def", "read_info_string", "(", "self", ")", ":", "infostring", "=", "[", "]", "# Send the command byte and sleep for 9 ms", "self", ".", "cnxn", ".", "xfer", "(", "[", "0x3F", "]", ")", "sleep", "(", "9e-3", ")", "# Read the info string by sending 60 empty bytes", ...
Reads the information string for the OPC :rtype: string :Example: >>> alpha.read_info_string() 'OPC-N2 FirmwareVer=OPC-018.2....................BD'
[ "Reads", "the", "information", "string", "for", "the", "OPC" ]
python
valid
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L222-L230
def next(self, type=None): """ Returns the next word in the sentence with the given type. """ i = self.index + 1 s = self.sentence while i < len(s): if type in (s[i].type, None): return s[i] i += 1
[ "def", "next", "(", "self", ",", "type", "=", "None", ")", ":", "i", "=", "self", ".", "index", "+", "1", "s", "=", "self", ".", "sentence", "while", "i", "<", "len", "(", "s", ")", ":", "if", "type", "in", "(", "s", "[", "i", "]", ".", "...
Returns the next word in the sentence with the given type.
[ "Returns", "the", "next", "word", "in", "the", "sentence", "with", "the", "given", "type", "." ]
python
train
lvieirajr/mongorest
mongorest/decorators.py
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/decorators.py#L47-L67
def serializable(wrapped): """ If a keyword argument 'serialize' with a True value is passed to the Wrapped function, the return of the wrapped function will be serialized. Nothing happens if the argument is not passed or the value is not True """ @wraps(wrapped) def wrapper(*args, **kwargs): should_serialize = kwargs.pop('serialize', False) result = wrapped(*args, **kwargs) return serialize(result) if should_serialize else result if hasattr(wrapped, 'decorators'): wrapper.decorators = wrapped.decorators wrapper.decorators.append('serializable') else: wrapper.decorators = ['serializable'] return wrapper
[ "def", "serializable", "(", "wrapped", ")", ":", "@", "wraps", "(", "wrapped", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "should_serialize", "=", "kwargs", ".", "pop", "(", "'serialize'", ",", "False", ")", "result", ...
If a keyword argument 'serialize' with a True value is passed to the Wrapped function, the return of the wrapped function will be serialized. Nothing happens if the argument is not passed or the value is not True
[ "If", "a", "keyword", "argument", "serialize", "with", "a", "True", "value", "is", "passed", "to", "the", "Wrapped", "function", "the", "return", "of", "the", "wrapped", "function", "will", "be", "serialized", ".", "Nothing", "happens", "if", "the", "argumen...
python
train
PmagPy/PmagPy
programs/deprecated/umich_magic.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/umich_magic.py#L7-L244
def main(): """ NAME umich_magic.py DESCRIPTION converts UMICH .mag format files to magic_measurements format files SYNTAX umich_magic.py [command line options] OPTIONS -h: prints the help message and quits. -usr USER: identify user, default is "" -f FILE: specify .mag format input file, required -fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none -F FILE: specify output file, default is magic_measurements.txt -spc NUM : specify number of characters to designate a specimen, default = 0 -loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic -ncn NCON: specify naming convention: default is #1 below -A: don't average replicate measurements Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY NB: all others you will have to customize your self or e-mail ltauxe@ucsd.edu for help. Format of UMICH .mag files: Spec Treat CSD Intensity Declination Inclination metadata string Spec: specimen name Treat: treatment step XXX T in Centigrade XXX AF in mT Intensity assumed to be total moment in 10^3 Am^2 (emu) Declination: Declination in specimen coordinate system Inclination: Declination in specimen coordinate system metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS hh in 24 hours. dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively xx.xxx DC field UNITS of DC field (microT, mT) INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes, measured in four positions) NMEAS: number of measurements in a single position (1,3,200...) """ # initialize some stuff dir_path='.' infile_type="mag" noave=0 methcode,inst="","" phi,theta,peakfield,labfield=0,0,0,0 pTRM,MD,samp_con,Z=0,0,'1',1 missing=1 demag="N" er_location_name="" citation='This study' args=sys.argv methcode="LP-NO" samp_file,ErSamps='',[] specnum=0 # # get command line arguments # meas_file="magic_measurements.txt" user="" if '-WD' in args: ind=args.index("-WD") dir_path=args[ind+1] if "-h" in args: print(main.__doc__) sys.exit() if "-usr" in args: ind=args.index("-usr") user=args[ind+1] if '-F' in args: ind=args.index("-F") meas_file=dir_path+'/'+args[ind+1] if '-f' in args: ind=args.index("-f") magfile=dir_path+'/'+args[ind+1] try: input=open(magfile,'r') except: print("bad mag file name") sys.exit() else: print("mag_file field is required option") print(main.__doc__) sys.exit() if "-spc" in args: ind=args.index("-spc") specnum=int(args[ind+1]) if specnum!=0:specnum=-specnum if "-loc" in args: ind=args.index("-loc") er_location_name=args[ind+1] if "-fsa" in args: ind=args.index("-fsa") samp_file=dir_path+'/'+args[ind+1] Samps,file_type=pmag.magic_read(samp_file) if "-A" in args: noave=1 if "-ncn" in args: ind=args.index("-ncn") samp_con=sys.argv[ind+1] if "4" in samp_con: if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") sys.exit() else: Z=samp_con.split("-")[1] samp_con="4" samp_con=sys.argv[ind+1] if "7" in samp_con: if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") sys.exit() else: Z=samp_con.split("-")[1] samp_con="7" MagRecs,specs=[],[] version_num=pmag.get_version() if infile_type=="mag": for line in input.readlines(): instcode="" if len(line)>2: MagRec={} MagRec['er_location_name']=er_location_name MagRec['magic_software_packages']=version_num MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["treatment_ac_field"]='0' MagRec["treatment_dc_field"]='0' MagRec["treatment_dc_field_phi"]='0' MagRec["treatment_dc_field_theta"]='0' meas_type="LT-NO" rec=line.split() labfield=0 code1=rec[6].split(';') date=code1[0].split('/') # break date into mon/day/year yy=int(date[2]) if yy <90: yyyy=str(2000+yy) else: yyyy=str(1900+yy) mm=int(date[0]) if mm<10: mm="0"+str(mm) else: mm=str(mm) dd=int(date[1]) if dd<10: dd="0"+str(dd) else: dd=str(dd) time=code1[1].split(':') hh=int(time[0]) if hh<10: hh="0"+str(hh) else: hh=str(hh) min=int(time[1]) if min<10: min= "0"+str(min) else: min=str(min) MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00" MagRec["measurement_time_zone"]='' instcode='' if len(code1)>1: MagRec["measurement_positions"]=code1[6][2] else: MagRec["measurement_positions"]=code1[7] # takes care of awkward format with bubba and flo being different if user=="":user=code1[5] if code1[2][-1]=='C': demag="T" if code1[2]=='mT': demag="AF" treat=rec[1].split('.') if len(treat)==1:treat.append('0') if demag=='T' and treat!=0: meas_type="LT-T-Z" MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin if demag=="AF": meas_type="LT-AF-Z" MagRec["treatment_ac_field"]='%8.3e' % (float(treat[0])*1e-3) # Af field in T MagRec["treatment_dc_field"]='0' MagRec["er_specimen_name"]=rec[0] if rec[0] not in specs:specs.append(rec[0]) # get a list of specimen names experiment=rec[0]+":" MagRec["er_site_name"]="" if specnum!=0: MagRec["er_sample_name"]=rec[0][:specnum] else: MagRec["er_sample_name"]=rec[0] if "-fsa" in args: for samp in Samps: if samp["er_sample_name"] == MagRec["er_sample_name"]: MagRec["er_location_name"]=samp["er_location_name"] MagRec["er_site_name"]=samp["er_site_name"] break elif int(samp_con)!=6: site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z) MagRec["er_site_name"]=site if MagRec['er_site_name']=="": print('No site name found for: ',MagRec['er_specimen_name'],MagRec['er_sample_name']) if MagRec["er_location_name"]=="": print('no location name for: ',MagRec["er_specimen_name"]) if rec[1]==".00":rec[1]="0.00" MagRec["measurement_csd"]=rec[2] MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[3])*1e-3) # moment in Am^2 (from emu) MagRec["measurement_dec"]=rec[4] MagRec["measurement_inc"]=rec[5] MagRec["magic_instrument_codes"]=instcode MagRec["er_analyst_mail_names"]=user MagRec["er_citation_names"]=citation MagRec["magic_method_codes"]=meas_type MagRec["measurement_flag"]='g' MagRec["er_specimen_name"]=rec[0] MagRec["measurement_number"]='1' MagRecs.append(MagRec) MagOuts=[] for spec in specs: # gather all demag types for this specimen SpecRecs,meths,measnum=[],[],1 for rec in MagRecs: if rec['er_specimen_name']==spec: rec['measurement_number']=str(measnum) measnum+=1 if rec['magic_method_codes'] not in meths:meths.append(rec['magic_method_codes']) SpecRecs.append(rec) expname=spec if "LT-AF-Z" in meths:expname=expname+ ':LP-DIR-AF' if "LT-T-Z" in meths:expname=expname+ ':LP-DIR-T' for rec in SpecRecs: rec['magic_experiment_name']=expname MagOuts.append(rec) pmag.magic_write(meas_file,MagOuts,'magic_measurements') print("results put in ",meas_file)
[ "def", "main", "(", ")", ":", "# initialize some stuff", "dir_path", "=", "'.'", "infile_type", "=", "\"mag\"", "noave", "=", "0", "methcode", ",", "inst", "=", "\"\"", ",", "\"\"", "phi", ",", "theta", ",", "peakfield", ",", "labfield", "=", "0", ",", ...
NAME umich_magic.py DESCRIPTION converts UMICH .mag format files to magic_measurements format files SYNTAX umich_magic.py [command line options] OPTIONS -h: prints the help message and quits. -usr USER: identify user, default is "" -f FILE: specify .mag format input file, required -fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none -F FILE: specify output file, default is magic_measurements.txt -spc NUM : specify number of characters to designate a specimen, default = 0 -loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic -ncn NCON: specify naming convention: default is #1 below -A: don't average replicate measurements Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY NB: all others you will have to customize your self or e-mail ltauxe@ucsd.edu for help. Format of UMICH .mag files: Spec Treat CSD Intensity Declination Inclination metadata string Spec: specimen name Treat: treatment step XXX T in Centigrade XXX AF in mT Intensity assumed to be total moment in 10^3 Am^2 (emu) Declination: Declination in specimen coordinate system Inclination: Declination in specimen coordinate system metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS hh in 24 hours. dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively xx.xxx DC field UNITS of DC field (microT, mT) INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes, measured in four positions) NMEAS: number of measurements in a single position (1,3,200...)
[ "NAME", "umich_magic", ".", "py", "DESCRIPTION", "converts", "UMICH", ".", "mag", "format", "files", "to", "magic_measurements", "format", "files" ]
python
train
rigetti/quantumflow
quantumflow/paulialgebra.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/paulialgebra.py#L348-L372
def pauli_commuting_sets(element: Pauli) -> Tuple[Pauli, ...]: """Gather the terms of a Pauli polynomial into commuting sets. Uses the algorithm defined in (Raeisi, Wiebe, Sanders, arXiv:1108.4318, 2011) to find commuting sets. Except uses commutation check from arXiv:1405.5749v2 """ if len(element) < 2: return (element,) groups: List[Pauli] = [] # typing: List[Pauli] for term in element: pterm = Pauli((term,)) assigned = False for i, grp in enumerate(groups): if paulis_commute(grp, pterm): groups[i] = grp + pterm assigned = True break if not assigned: groups.append(pterm) return tuple(groups)
[ "def", "pauli_commuting_sets", "(", "element", ":", "Pauli", ")", "->", "Tuple", "[", "Pauli", ",", "...", "]", ":", "if", "len", "(", "element", ")", "<", "2", ":", "return", "(", "element", ",", ")", "groups", ":", "List", "[", "Pauli", "]", "=",...
Gather the terms of a Pauli polynomial into commuting sets. Uses the algorithm defined in (Raeisi, Wiebe, Sanders, arXiv:1108.4318, 2011) to find commuting sets. Except uses commutation check from arXiv:1405.5749v2
[ "Gather", "the", "terms", "of", "a", "Pauli", "polynomial", "into", "commuting", "sets", "." ]
python
train
pgmpy/pgmpy
pgmpy/models/DynamicBayesianNetwork.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/DynamicBayesianNetwork.py#L120-L133
def _nodes(self): """ Returns the list of nodes present in the network Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['A', 'B', 'C']) >>> sorted(dbn._nodes()) ['B', 'A', 'C'] """ return list(set([node for node, timeslice in super(DynamicBayesianNetwork, self).nodes()]))
[ "def", "_nodes", "(", "self", ")", ":", "return", "list", "(", "set", "(", "[", "node", "for", "node", ",", "timeslice", "in", "super", "(", "DynamicBayesianNetwork", ",", "self", ")", ".", "nodes", "(", ")", "]", ")", ")" ]
Returns the list of nodes present in the network Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['A', 'B', 'C']) >>> sorted(dbn._nodes()) ['B', 'A', 'C']
[ "Returns", "the", "list", "of", "nodes", "present", "in", "the", "network" ]
python
train
pandas-dev/pandas
pandas/core/internals/blocks.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2597-L2643
def convert(self, *args, **kwargs): """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we ARE an ObjectBlock!!!!! can return multiple blocks! """ if args: raise NotImplementedError by_item = kwargs.get('by_item', True) new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] new_style = False for kw in new_inputs: new_style |= kw in kwargs if new_style: fn = soft_convert_objects fn_inputs = new_inputs else: fn = maybe_convert_objects fn_inputs = ['convert_dates', 'convert_numeric', 'convert_timedeltas'] fn_inputs += ['copy'] fn_kwargs = {key: kwargs[key] for key in fn_inputs if key in kwargs} # operate column-by-column def f(m, v, i): shape = v.shape values = fn(v.ravel(), **fn_kwargs) try: values = values.reshape(shape) values = _block_shape(values, ndim=self.ndim) except (AttributeError, NotImplementedError): pass return values if by_item and not self._is_single_block: blocks = self.split_and_operate(None, f, False) else: values = f(None, self.values.ravel(), None) blocks = [make_block(values, ndim=self.ndim, placement=self.mgr_locs)] return blocks
[ "def", "convert", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", ":", "raise", "NotImplementedError", "by_item", "=", "kwargs", ".", "get", "(", "'by_item'", ",", "True", ")", "new_inputs", "=", "[", "'coerce'", ",", ...
attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we ARE an ObjectBlock!!!!! can return multiple blocks!
[ "attempt", "to", "coerce", "any", "object", "types", "to", "better", "types", "return", "a", "copy", "of", "the", "block", "(", "if", "copy", "=", "True", ")", "by", "definition", "we", "ARE", "an", "ObjectBlock!!!!!" ]
python
train
datascopeanalytics/scrubadub
scrubadub/detectors/__init__.py
https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/detectors/__init__.py#L12-L22
def iter_detector_clss(): """Iterate over all of the detectors that are included in this sub-package. This is a convenience method for capturing all new Detectors that are added over time and it is used both by the unit tests and in the ``Scrubber.__init__`` method. """ return iter_subclasses( os.path.dirname(os.path.abspath(__file__)), Detector, _is_abstract_detector, )
[ "def", "iter_detector_clss", "(", ")", ":", "return", "iter_subclasses", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "Detector", ",", "_is_abstract_detector", ",", ")" ]
Iterate over all of the detectors that are included in this sub-package. This is a convenience method for capturing all new Detectors that are added over time and it is used both by the unit tests and in the ``Scrubber.__init__`` method.
[ "Iterate", "over", "all", "of", "the", "detectors", "that", "are", "included", "in", "this", "sub", "-", "package", ".", "This", "is", "a", "convenience", "method", "for", "capturing", "all", "new", "Detectors", "that", "are", "added", "over", "time", "and...
python
train
wmayner/pyphi
pyphi/validate.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/validate.py#L118-L128
def network(n): """Validate a |Network|. Checks the TPM and connectivity matrix. """ tpm(n.tpm) connectivity_matrix(n.cm) if n.cm.shape[0] != n.size: raise ValueError("Connectivity matrix must be NxN, where N is the " "number of nodes in the network.") return True
[ "def", "network", "(", "n", ")", ":", "tpm", "(", "n", ".", "tpm", ")", "connectivity_matrix", "(", "n", ".", "cm", ")", "if", "n", ".", "cm", ".", "shape", "[", "0", "]", "!=", "n", ".", "size", ":", "raise", "ValueError", "(", "\"Connectivity m...
Validate a |Network|. Checks the TPM and connectivity matrix.
[ "Validate", "a", "|Network|", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/mpls_state/rsvp/interfaces/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/mpls_state/rsvp/interfaces/__init__.py#L871-L894
def _set_interface_flooding_up_threshold(self, v, load=False): """ Setter method for interface_flooding_up_threshold, mapped from YANG variable /mpls_state/rsvp/interfaces/interface_flooding_up_threshold (feature-config-source) If this variable is read-only (config: false) in the source YANG file, then _set_interface_flooding_up_threshold is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_interface_flooding_up_threshold() directly. YANG Description: Interface flood up threshold is taken from global, local or default settings """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'local-config': {'value': 2}, u'default-config': {'value': 3}, u'global-config': {'value': 1}},), is_leaf=True, yang_name="interface-flooding-up-threshold", rest_name="interface-flooding-up-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='feature-config-source', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """interface_flooding_up_threshold must be of a type compatible with feature-config-source""", 'defined-type': "brocade-mpls-operational:feature-config-source", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'local-config': {'value': 2}, u'default-config': {'value': 3}, u'global-config': {'value': 1}},), is_leaf=True, yang_name="interface-flooding-up-threshold", rest_name="interface-flooding-up-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='feature-config-source', is_config=False)""", }) self.__interface_flooding_up_threshold = t if hasattr(self, '_set'): self._set()
[ "def", "_set_interface_flooding_up_threshold", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(",...
Setter method for interface_flooding_up_threshold, mapped from YANG variable /mpls_state/rsvp/interfaces/interface_flooding_up_threshold (feature-config-source) If this variable is read-only (config: false) in the source YANG file, then _set_interface_flooding_up_threshold is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_interface_flooding_up_threshold() directly. YANG Description: Interface flood up threshold is taken from global, local or default settings
[ "Setter", "method", "for", "interface_flooding_up_threshold", "mapped", "from", "YANG", "variable", "/", "mpls_state", "/", "rsvp", "/", "interfaces", "/", "interface_flooding_up_threshold", "(", "feature", "-", "config", "-", "source", ")", "If", "this", "variable"...
python
train
peeringdb/peeringdb-py
peeringdb/config.py
https://github.com/peeringdb/peeringdb-py/blob/cf2060a1d5ef879a01cf849e54b7756909ab2661/peeringdb/config.py#L115-L120
def convert_old(data): "Convert config data with old schema to new schema" ret = default_config() ret['sync'].update(data.get('peeringdb', {})) ret['orm']['database'].update(data.get('database', {})) return ret
[ "def", "convert_old", "(", "data", ")", ":", "ret", "=", "default_config", "(", ")", "ret", "[", "'sync'", "]", ".", "update", "(", "data", ".", "get", "(", "'peeringdb'", ",", "{", "}", ")", ")", "ret", "[", "'orm'", "]", "[", "'database'", "]", ...
Convert config data with old schema to new schema
[ "Convert", "config", "data", "with", "old", "schema", "to", "new", "schema" ]
python
train
project-rig/rig
rig/machine_control/machine_controller.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2838-L2869
def unpack_routing_table_entry(packed): """Unpack a routing table entry read from a SpiNNaker machine. Parameters ---------- packet : :py:class:`bytes` Bytes containing a packed routing table. Returns ------- (:py:class:`~rig.routing_table.RoutingTableEntry`, app_id, core) or None Tuple containing the routing entry, the app_id associated with the entry and the core number associated with the entry; or None if the routing table entry is flagged as unused. """ # Unpack the routing table entry _, free, route, key, mask = struct.unpack(consts.RTE_PACK_STRING, packed) # If the top 8 bits of the route are set then this entry is not in use, so # return None. if route & 0xff000000 == 0xff000000: return None # Convert the routing table entry routes = {r for r in routing_table.Routes if (route >> r) & 0x1} rte = routing_table.RoutingTableEntry(routes, key, mask) # Convert the surrounding data app_id = free & 0xff core = (free >> 8) & 0x0f return (rte, app_id, core)
[ "def", "unpack_routing_table_entry", "(", "packed", ")", ":", "# Unpack the routing table entry", "_", ",", "free", ",", "route", ",", "key", ",", "mask", "=", "struct", ".", "unpack", "(", "consts", ".", "RTE_PACK_STRING", ",", "packed", ")", "# If the top 8 bi...
Unpack a routing table entry read from a SpiNNaker machine. Parameters ---------- packet : :py:class:`bytes` Bytes containing a packed routing table. Returns ------- (:py:class:`~rig.routing_table.RoutingTableEntry`, app_id, core) or None Tuple containing the routing entry, the app_id associated with the entry and the core number associated with the entry; or None if the routing table entry is flagged as unused.
[ "Unpack", "a", "routing", "table", "entry", "read", "from", "a", "SpiNNaker", "machine", "." ]
python
train
edx/XBlock
xblock/runtime.py
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/runtime.py#L1264-L1268
def lex(self, text): """Iterator that tokenizes `text` and yields up tokens as they are found""" for match in self.regex.finditer(text): name = match.lastgroup yield (name, match.group(name))
[ "def", "lex", "(", "self", ",", "text", ")", ":", "for", "match", "in", "self", ".", "regex", ".", "finditer", "(", "text", ")", ":", "name", "=", "match", ".", "lastgroup", "yield", "(", "name", ",", "match", ".", "group", "(", "name", ")", ")" ...
Iterator that tokenizes `text` and yields up tokens as they are found
[ "Iterator", "that", "tokenizes", "text", "and", "yields", "up", "tokens", "as", "they", "are", "found" ]
python
train
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L818-L833
def color_scale_HSV(c: Color, scoef: float, vcoef: float) -> None: """Scale a color's saturation and value. Does not return a new Color. ``c`` is modified inplace. Args: c (Union[Color, List[int]]): A Color instance, or an [r, g, b] list. scoef (float): Saturation multiplier, from 0 to 1. Use 1 to keep current saturation. vcoef (float): Value multiplier, from 0 to 1. Use 1 to keep current value. """ color_p = ffi.new("TCOD_color_t*") color_p.r, color_p.g, color_p.b = c.r, c.g, c.b lib.TCOD_color_scale_HSV(color_p, scoef, vcoef) c[:] = color_p.r, color_p.g, color_p.b
[ "def", "color_scale_HSV", "(", "c", ":", "Color", ",", "scoef", ":", "float", ",", "vcoef", ":", "float", ")", "->", "None", ":", "color_p", "=", "ffi", ".", "new", "(", "\"TCOD_color_t*\"", ")", "color_p", ".", "r", ",", "color_p", ".", "g", ",", ...
Scale a color's saturation and value. Does not return a new Color. ``c`` is modified inplace. Args: c (Union[Color, List[int]]): A Color instance, or an [r, g, b] list. scoef (float): Saturation multiplier, from 0 to 1. Use 1 to keep current saturation. vcoef (float): Value multiplier, from 0 to 1. Use 1 to keep current value.
[ "Scale", "a", "color", "s", "saturation", "and", "value", "." ]
python
train
CI-WATER/mapkit
mapkit/RasterLoader.py
https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/RasterLoader.py#L181-L247
def makeSingleBandWKBRaster(cls, session, width, height, upperLeftX, upperLeftY, cellSizeX, cellSizeY, skewX, skewY, srid, dataArray, initialValue=None, noDataValue=None): """ Generate Well Known Binary via SQL. Must be used on a PostGIS database as it relies on several PostGIS database functions. :param session: SQLAlchemy session object bound to a PostGIS enabled database :param height: Height of the raster (or number of rows) :param width: Width of the raster (or number of columns) :param upperLeftX: Raster upper left corner X coordinate :param upperLeftY: Raster upper left corner Y coordinate :param cellSizeX: Raster cell size in X direction :param cellSizeY: Raster cell size in Y direction :param skewX: Skew in X direction :param skewY: Skew in Y direction :param srid: SRID of the raster :param initialValue: Initial / default value of the raster cells :param noDataValue: Value of cells to be considered as cells containing no cells :param dataArray: 2-dimensional list of values or a string representation of a 2-dimensional list that will be used to populate the raster values """ # Stringify the data array if isinstance(dataArray, str): dataArrayString = dataArray else: dataArrayString = json.dumps(dataArray) # Validate if initialValue is None: initialValue = 'NULL' if noDataValue is None: noDataValue = 'NULL' # Cell size in the Y direction must be negative if cellSizeY > 0: print('RASTER LOADER WARNING: cellSizeY should be defined as negative.') cellSizeY = -1 * cellSizeY # Create the SQL statement statement = ''' SELECT ST_SetValues( ST_AddBand( ST_MakeEmptyRaster({0}::integer, {1}::integer, {2}, {3}, {4}, {5}, {6}, {7}, {8}::integer), 1::integer, '32BF'::text, {9}::double precision, {10}::double precision ), 1, 1, 1, ARRAY{11}::double precision[][] ); '''.format(width, height, upperLeftX, upperLeftY, cellSizeX, cellSizeY, skewX, skewY, srid, initialValue, noDataValue, dataArrayString) result = session.execute(statement) # Extract result wellKnownBinary = '' for row in result: wellKnownBinary = row[0] return wellKnownBinary
[ "def", "makeSingleBandWKBRaster", "(", "cls", ",", "session", ",", "width", ",", "height", ",", "upperLeftX", ",", "upperLeftY", ",", "cellSizeX", ",", "cellSizeY", ",", "skewX", ",", "skewY", ",", "srid", ",", "dataArray", ",", "initialValue", "=", "None", ...
Generate Well Known Binary via SQL. Must be used on a PostGIS database as it relies on several PostGIS database functions. :param session: SQLAlchemy session object bound to a PostGIS enabled database :param height: Height of the raster (or number of rows) :param width: Width of the raster (or number of columns) :param upperLeftX: Raster upper left corner X coordinate :param upperLeftY: Raster upper left corner Y coordinate :param cellSizeX: Raster cell size in X direction :param cellSizeY: Raster cell size in Y direction :param skewX: Skew in X direction :param skewY: Skew in Y direction :param srid: SRID of the raster :param initialValue: Initial / default value of the raster cells :param noDataValue: Value of cells to be considered as cells containing no cells :param dataArray: 2-dimensional list of values or a string representation of a 2-dimensional list that will be used to populate the raster values
[ "Generate", "Well", "Known", "Binary", "via", "SQL", ".", "Must", "be", "used", "on", "a", "PostGIS", "database", "as", "it", "relies", "on", "several", "PostGIS", "database", "functions", ".", ":", "param", "session", ":", "SQLAlchemy", "session", "object",...
python
train
secynic/ipwhois
ipwhois/rdap.py
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/rdap.py#L600-L661
def parse(self): """ The function for parsing the JSON response to the vars dictionary. """ try: self.vars['handle'] = self.json['handle'].strip() except (KeyError, ValueError, TypeError): raise InvalidEntityObject('Handle is missing for RDAP entity') for v in ['roles', 'country']: try: self.vars[v] = self.json[v] except (KeyError, ValueError): pass try: vcard = self.json['vcardArray'][1] c = _RDAPContact(vcard) c.parse() self.vars['contact'] = c.vars except (KeyError, ValueError, TypeError): pass try: self.vars['events_actor'] = self.summarize_events( self.json['asEventActor']) except (KeyError, ValueError, TypeError): pass self.vars['entities'] = [] try: for ent in self.json['entities']: if ent['handle'] not in self.vars['entities']: self.vars['entities'].append(ent['handle']) except (KeyError, ValueError, TypeError): pass if not self.vars['entities']: self.vars['entities'] = None self._parse()
[ "def", "parse", "(", "self", ")", ":", "try", ":", "self", ".", "vars", "[", "'handle'", "]", "=", "self", ".", "json", "[", "'handle'", "]", ".", "strip", "(", ")", "except", "(", "KeyError", ",", "ValueError", ",", "TypeError", ")", ":", "raise",...
The function for parsing the JSON response to the vars dictionary.
[ "The", "function", "for", "parsing", "the", "JSON", "response", "to", "the", "vars", "dictionary", "." ]
python
train
MDAnalysis/GridDataFormats
gridData/OpenDX.py
https://github.com/MDAnalysis/GridDataFormats/blob/3eeb0432f8cf856912436e4f3e7aba99d3c916be/gridData/OpenDX.py#L462-L484
def write(self, filename): """Write the complete dx object to the file. This is the simple OpenDX format which includes the data into the header via the 'object array ... data follows' statement. Only simple regular arrays are supported. The format should be compatible with VMD's dx reader plugin. """ # comments (VMD chokes on lines of len > 80, so truncate) maxcol = 80 with open(filename,'w') as outfile: for line in self.comments: comment = '# '+str(line) outfile.write(comment[:maxcol]+'\n') # each individual object for component,object in self.sorted_components(): object.write(outfile) # the field object itself DXclass.write(self,outfile,quote=True) for component,object in self.sorted_components(): outfile.write('component "%s" value %s\n' % (component,str(object.id)))
[ "def", "write", "(", "self", ",", "filename", ")", ":", "# comments (VMD chokes on lines of len > 80, so truncate)", "maxcol", "=", "80", "with", "open", "(", "filename", ",", "'w'", ")", "as", "outfile", ":", "for", "line", "in", "self", ".", "comments", ":",...
Write the complete dx object to the file. This is the simple OpenDX format which includes the data into the header via the 'object array ... data follows' statement. Only simple regular arrays are supported. The format should be compatible with VMD's dx reader plugin.
[ "Write", "the", "complete", "dx", "object", "to", "the", "file", "." ]
python
valid
ejeschke/ginga
ginga/canvas/transform.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/canvas/transform.py#L166-L181
def from_(self, pct_pts): """Reverse of :meth:`to_`.""" pct_pts = np.asarray(pct_pts, dtype=np.float) has_z = (pct_pts.shape[-1] > 2) max_pt = list(self.viewer.get_window_size()) if has_z: max_pt.append(0.0) win_pts = np.multiply(pct_pts, max_pt) # round to pixel units, if asked if self.as_int: win_pts = np.rint(win_pts).astype(np.int, copy=False) return win_pts
[ "def", "from_", "(", "self", ",", "pct_pts", ")", ":", "pct_pts", "=", "np", ".", "asarray", "(", "pct_pts", ",", "dtype", "=", "np", ".", "float", ")", "has_z", "=", "(", "pct_pts", ".", "shape", "[", "-", "1", "]", ">", "2", ")", "max_pt", "=...
Reverse of :meth:`to_`.
[ "Reverse", "of", ":", "meth", ":", "to_", "." ]
python
train
PedalPi/PluginsManager
pluginsmanager/observer/observable_list.py
https://github.com/PedalPi/PluginsManager/blob/2dcc9f6a79b48e9c9be82efffd855352fa15c5c7/pluginsmanager/observer/observable_list.py#L149-L164
def move(self, item, new_position): """ Moves a item list to new position Calls observer ``self.observer(UpdateType.DELETED, item, index)`` and observer ``self.observer(UpdateType.CREATED, item, index)`` if ``val != self[index]`` :param item: Item that will be moved to new_position :param new_position: Item's new position """ if item == self[new_position]: return self.remove(item) self.insert(new_position, item)
[ "def", "move", "(", "self", ",", "item", ",", "new_position", ")", ":", "if", "item", "==", "self", "[", "new_position", "]", ":", "return", "self", ".", "remove", "(", "item", ")", "self", ".", "insert", "(", "new_position", ",", "item", ")" ]
Moves a item list to new position Calls observer ``self.observer(UpdateType.DELETED, item, index)`` and observer ``self.observer(UpdateType.CREATED, item, index)`` if ``val != self[index]`` :param item: Item that will be moved to new_position :param new_position: Item's new position
[ "Moves", "a", "item", "list", "to", "new", "position" ]
python
train
artefactual-labs/mets-reader-writer
metsrw/fsentry.py
https://github.com/artefactual-labs/mets-reader-writer/blob/d95939cabdfdc25cb1bf67df0c84bd0d6e6a73ff/metsrw/fsentry.py#L243-L275
def _add_metadata_element(self, md, subsection, mdtype, mode="mdwrap", **kwargs): """ :param md: Value to pass to the MDWrap/MDRef :param str subsection: Metadata tag to create. See :const:`SubSection.ALLOWED_SUBSECTIONS` :param str mdtype: Value for mdWrap/mdRef @MDTYPE :param str mode: 'mdwrap' or 'mdref' :param str loctype: Required if mode is 'mdref'. LOCTYPE of a mdRef :param str label: Optional. Label of a mdRef :param str otherloctype: Optional. OTHERLOCTYPE of a mdRef. :param str othermdtype: Optional. OTHERMDTYPE of a mdWrap. """ # HELP how handle multiple amdSecs? # When adding *MD which amdSec to add to? if mode.lower() == "mdwrap": othermdtype = kwargs.get("othermdtype") mdsec = MDWrap(md, mdtype, othermdtype) elif mode.lower() == "mdref": loctype = kwargs.get("loctype") label = kwargs.get("label") otherloctype = kwargs.get("otherloctype") mdsec = MDRef(md, mdtype, loctype, label, otherloctype) subsection = SubSection(subsection, mdsec) if subsection.subsection == "dmdSec": self.dmdsecs.append(subsection) else: try: amdsec = self.amdsecs[0] except IndexError: amdsec = AMDSec() self.amdsecs.append(amdsec) amdsec.subsections.append(subsection) return subsection
[ "def", "_add_metadata_element", "(", "self", ",", "md", ",", "subsection", ",", "mdtype", ",", "mode", "=", "\"mdwrap\"", ",", "*", "*", "kwargs", ")", ":", "# HELP how handle multiple amdSecs?", "# When adding *MD which amdSec to add to?", "if", "mode", ".", "lower...
:param md: Value to pass to the MDWrap/MDRef :param str subsection: Metadata tag to create. See :const:`SubSection.ALLOWED_SUBSECTIONS` :param str mdtype: Value for mdWrap/mdRef @MDTYPE :param str mode: 'mdwrap' or 'mdref' :param str loctype: Required if mode is 'mdref'. LOCTYPE of a mdRef :param str label: Optional. Label of a mdRef :param str otherloctype: Optional. OTHERLOCTYPE of a mdRef. :param str othermdtype: Optional. OTHERMDTYPE of a mdWrap.
[ ":", "param", "md", ":", "Value", "to", "pass", "to", "the", "MDWrap", "/", "MDRef", ":", "param", "str", "subsection", ":", "Metadata", "tag", "to", "create", ".", "See", ":", "const", ":", "SubSection", ".", "ALLOWED_SUBSECTIONS", ":", "param", "str", ...
python
train
dusty-phillips/opterator
opterator.py
https://github.com/dusty-phillips/opterator/blob/84fe31f22c73dc0a3666ed82c179461b1799c257/opterator.py#L52-L78
def portable_argspec(func): ''' Given a function, return a tuple of (positional_params, keyword_params, varargs, defaults, annotations) where * positional_params is a list of parameters that don't have default values * keyword_params is a list of parameters that have default values * varargs is the string name for variable arguments * defaults is a dict of default values for the keyword parameters * annotations is a dictionary of param_name: annotation pairs it may be empty, and on python 2 will always be empty. This function is portable between Python 2 and Python 3, and does some extra processing of the output from inspect. ''' if sys.version_info < (3, 0): # PYTHON 2 MUST DIE argnames, varargs, varkw, defaults = inspect.getargspec(func) annotations = {} else: ( argnames, varargs, varkw, defaults, kwa, kwd, annotations ) = inspect.getfullargspec(func) kw_boundary = len(argnames) - len(defaults) if defaults else len(argnames) positional_params = argnames[:kw_boundary] kw_params = argnames[kw_boundary:] return positional_params, kw_params, varargs, defaults, annotations
[ "def", "portable_argspec", "(", "func", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "# PYTHON 2 MUST DIE", "argnames", ",", "varargs", ",", "varkw", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "func", ")", ...
Given a function, return a tuple of (positional_params, keyword_params, varargs, defaults, annotations) where * positional_params is a list of parameters that don't have default values * keyword_params is a list of parameters that have default values * varargs is the string name for variable arguments * defaults is a dict of default values for the keyword parameters * annotations is a dictionary of param_name: annotation pairs it may be empty, and on python 2 will always be empty. This function is portable between Python 2 and Python 3, and does some extra processing of the output from inspect.
[ "Given", "a", "function", "return", "a", "tuple", "of", "(", "positional_params", "keyword_params", "varargs", "defaults", "annotations", ")", "where", "*", "positional_params", "is", "a", "list", "of", "parameters", "that", "don", "t", "have", "default", "value...
python
train
glenfant/openxmllib
openxmllib/document.py
https://github.com/glenfant/openxmllib/blob/c8208f8ecd9fc3ef1e73c1db68081a65361afb3f/openxmllib/document.py#L166-L175
def allProperties(self): """Helper that merges core, extended and custom properties :return: mapping of all properties """ rval = {} rval.update(self.coreProperties) rval.update(self.extendedProperties) rval.update(self.customProperties) return rval
[ "def", "allProperties", "(", "self", ")", ":", "rval", "=", "{", "}", "rval", ".", "update", "(", "self", ".", "coreProperties", ")", "rval", ".", "update", "(", "self", ".", "extendedProperties", ")", "rval", ".", "update", "(", "self", ".", "customPr...
Helper that merges core, extended and custom properties :return: mapping of all properties
[ "Helper", "that", "merges", "core", "extended", "and", "custom", "properties" ]
python
train
DigitalGlobe/gbdxtools
gbdxtools/images/meta.py
https://github.com/DigitalGlobe/gbdxtools/blob/def62f8f2d77b168aa2bd115290aaa0f9a08a4bb/gbdxtools/images/meta.py#L260-L296
def pxbounds(self, geom, clip=False): """ Returns the bounds of a geometry object in pixel coordinates Args: geom: Shapely geometry object or GeoJSON as Python dictionary or WKT string clip (bool): Clip the bounds to the min/max extent of the image Returns: list: bounds in pixels [min x, min y, max x, max y] clipped to image bounds """ try: if isinstance(geom, dict): if 'geometry' in geom: geom = shape(geom['geometry']) else: geom = shape(geom) elif isinstance(geom, BaseGeometry): geom = shape(geom) else: geom = wkt.loads(geom) except: raise TypeError ("Invalid geometry object") # if geometry doesn't overlap the image, return an error if geom.disjoint(shape(self)): raise ValueError("Geometry outside of image bounds") # clip to pixels within the image (xmin, ymin, xmax, ymax) = ops.transform(self.__geo_transform__.rev, geom).bounds _nbands, ysize, xsize = self.shape if clip: xmin = max(xmin, 0) ymin = max(ymin, 0) xmax = min(xmax, xsize) ymax = min(ymax, ysize) return (xmin, ymin, xmax, ymax)
[ "def", "pxbounds", "(", "self", ",", "geom", ",", "clip", "=", "False", ")", ":", "try", ":", "if", "isinstance", "(", "geom", ",", "dict", ")", ":", "if", "'geometry'", "in", "geom", ":", "geom", "=", "shape", "(", "geom", "[", "'geometry'", "]", ...
Returns the bounds of a geometry object in pixel coordinates Args: geom: Shapely geometry object or GeoJSON as Python dictionary or WKT string clip (bool): Clip the bounds to the min/max extent of the image Returns: list: bounds in pixels [min x, min y, max x, max y] clipped to image bounds
[ "Returns", "the", "bounds", "of", "a", "geometry", "object", "in", "pixel", "coordinates" ]
python
valid
python-diamond/Diamond
src/diamond/handler/riemann.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/riemann.py#L69-L81
def get_default_config(self): """ Return the default config for the handler """ config = super(RiemannHandler, self).get_default_config() config.update({ 'host': '', 'port': 123, 'transport': 'tcp', }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "RiemannHandler", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'host'", ":", "''", ",", "'port'", ":", "123", ",", "'transport'...
Return the default config for the handler
[ "Return", "the", "default", "config", "for", "the", "handler" ]
python
train
samirelanduk/quickplots
quickplots/series.py
https://github.com/samirelanduk/quickplots/blob/59f5e6ff367b2c1c24ba7cf1805d03552034c6d8/quickplots/series.py#L277-L290
def write_to_canvas(self, canvas, name): """Writes the series to an OmniCanvas canvas. :param Canvas canvas: The canvas to write to. :param str name: The name to give the line graphic on the canvas.""" points = self.canvas_points() args = [] for point in points: args += list(point) canvas.add_polyline( *args, line_color=self.color(), line_style=self.linestyle(), line_width=self.linewidth(), name=name )
[ "def", "write_to_canvas", "(", "self", ",", "canvas", ",", "name", ")", ":", "points", "=", "self", ".", "canvas_points", "(", ")", "args", "=", "[", "]", "for", "point", "in", "points", ":", "args", "+=", "list", "(", "point", ")", "canvas", ".", ...
Writes the series to an OmniCanvas canvas. :param Canvas canvas: The canvas to write to. :param str name: The name to give the line graphic on the canvas.
[ "Writes", "the", "series", "to", "an", "OmniCanvas", "canvas", "." ]
python
train
gwastro/pycbc
pycbc/inference/io/__init__.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/io/__init__.py#L622-L648
def injections_from_cli(opts): """Gets injection parameters from the inference file(s). Parameters ---------- opts : argparser Argparser object that has the command-line objects to parse. Returns ------- FieldArray Array of the injection parameters from all of the input files given by ``opts.input_file``. """ input_files = opts.input_file if isinstance(input_files, str): input_files = [input_files] injections = None # loop over all input files getting the injection files for input_file in input_files: fp = loadfile(input_file, 'r') these_injs = fp.read_injections() if injections is None: injections = these_injs else: injections = injections.append(these_injs) return injections
[ "def", "injections_from_cli", "(", "opts", ")", ":", "input_files", "=", "opts", ".", "input_file", "if", "isinstance", "(", "input_files", ",", "str", ")", ":", "input_files", "=", "[", "input_files", "]", "injections", "=", "None", "# loop over all input files...
Gets injection parameters from the inference file(s). Parameters ---------- opts : argparser Argparser object that has the command-line objects to parse. Returns ------- FieldArray Array of the injection parameters from all of the input files given by ``opts.input_file``.
[ "Gets", "injection", "parameters", "from", "the", "inference", "file", "(", "s", ")", "." ]
python
train
sorgerlab/indra
indra/tools/assemble_corpus.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L43-L78
def load_statements(fname, as_dict=False): """Load statements from a pickle file. Parameters ---------- fname : str The name of the pickle file to load statements from. as_dict : Optional[bool] If True and the pickle file contains a dictionary of statements, it is returned as a dictionary. If False, the statements are always returned in a list. Default: False Returns ------- stmts : list A list or dict of statements that were loaded. """ logger.info('Loading %s...' % fname) with open(fname, 'rb') as fh: # Encoding argument not available in pickle for Python 2 if sys.version_info[0] < 3: stmts = pickle.load(fh) # Encoding argument specified here to enable compatibility with # pickle files created with Python 2 else: stmts = pickle.load(fh, encoding='latin1') if isinstance(stmts, dict): if as_dict: return stmts st = [] for pmid, st_list in stmts.items(): st += st_list stmts = st logger.info('Loaded %d statements' % len(stmts)) return stmts
[ "def", "load_statements", "(", "fname", ",", "as_dict", "=", "False", ")", ":", "logger", ".", "info", "(", "'Loading %s...'", "%", "fname", ")", "with", "open", "(", "fname", ",", "'rb'", ")", "as", "fh", ":", "# Encoding argument not available in pickle for ...
Load statements from a pickle file. Parameters ---------- fname : str The name of the pickle file to load statements from. as_dict : Optional[bool] If True and the pickle file contains a dictionary of statements, it is returned as a dictionary. If False, the statements are always returned in a list. Default: False Returns ------- stmts : list A list or dict of statements that were loaded.
[ "Load", "statements", "from", "a", "pickle", "file", "." ]
python
train
googleapis/oauth2client
oauth2client/client.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/client.py#L829-L863
def _do_revoke(self, http, token): """Revokes this credential and deletes the stored copy (if it exists). Args: http: an object to be used to make HTTP requests. token: A string used as the token to be revoked. Can be either an access_token or refresh_token. Raises: TokenRevokeError: If the revoke request does not return with a 200 OK. """ logger.info('Revoking token') query_params = {'token': token} token_revoke_uri = _helpers.update_query_params( self.revoke_uri, query_params) resp, content = transport.request(http, token_revoke_uri) if resp.status == http_client.METHOD_NOT_ALLOWED: body = urllib.parse.urlencode(query_params) resp, content = transport.request(http, token_revoke_uri, method='POST', body=body) if resp.status == http_client.OK: self.invalid = True else: error_msg = 'Invalid response {0}.'.format(resp.status) try: d = json.loads(_helpers._from_bytes(content)) if 'error' in d: error_msg = d['error'] except (TypeError, ValueError): pass raise TokenRevokeError(error_msg) if self.store: self.store.delete()
[ "def", "_do_revoke", "(", "self", ",", "http", ",", "token", ")", ":", "logger", ".", "info", "(", "'Revoking token'", ")", "query_params", "=", "{", "'token'", ":", "token", "}", "token_revoke_uri", "=", "_helpers", ".", "update_query_params", "(", "self", ...
Revokes this credential and deletes the stored copy (if it exists). Args: http: an object to be used to make HTTP requests. token: A string used as the token to be revoked. Can be either an access_token or refresh_token. Raises: TokenRevokeError: If the revoke request does not return with a 200 OK.
[ "Revokes", "this", "credential", "and", "deletes", "the", "stored", "copy", "(", "if", "it", "exists", ")", "." ]
python
valid
libfuse/python-fuse
fuseparts/subbedopts.py
https://github.com/libfuse/python-fuse/blob/2c088b657ad71faca6975b456f80b7d2c2cea2a7/fuseparts/subbedopts.py#L59-L76
def filter(self, other): """ Throw away those options which are not in the other one. Returns a new instance with the rejected options. """ self.canonify() other.canonify() rej = self.__class__() rej.optlist = self.optlist.difference(other.optlist) self.optlist.difference_update(rej.optlist) for x in self.optdict.copy(): if x not in other.optdict: self.optdict.pop(x) rej.optdict[x] = None return rej
[ "def", "filter", "(", "self", ",", "other", ")", ":", "self", ".", "canonify", "(", ")", "other", ".", "canonify", "(", ")", "rej", "=", "self", ".", "__class__", "(", ")", "rej", ".", "optlist", "=", "self", ".", "optlist", ".", "difference", "(",...
Throw away those options which are not in the other one. Returns a new instance with the rejected options.
[ "Throw", "away", "those", "options", "which", "are", "not", "in", "the", "other", "one", ".", "Returns", "a", "new", "instance", "with", "the", "rejected", "options", "." ]
python
train
wtsi-hgi/gitlab-build-variables
gitlabbuildvariables/executables/gitlab_set_variables.py
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/executables/gitlab_set_variables.py#L21-L35
def _parse_args(args: List[str]) -> _SetArgumentsRunConfig: """ Parses the given CLI arguments to get a run configuration. :param args: CLI arguments :return: run configuration derived from the given CLI arguments """ parser = argparse.ArgumentParser( prog="gitlab-set-variables", description="Tool for setting a GitLab project's build variables") add_common_arguments(parser, project=True) parser.add_argument("source", nargs="+", type=str, help="File to source build variables from. Can be a ini file, JSON file or a shell script " "containing 'export' statements") arguments = parser.parse_args(args) return _SetArgumentsRunConfig(arguments.source, arguments.project, arguments.url, arguments.token, arguments.debug)
[ "def", "_parse_args", "(", "args", ":", "List", "[", "str", "]", ")", "->", "_SetArgumentsRunConfig", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "\"gitlab-set-variables\"", ",", "description", "=", "\"Tool for setting a GitLab project's...
Parses the given CLI arguments to get a run configuration. :param args: CLI arguments :return: run configuration derived from the given CLI arguments
[ "Parses", "the", "given", "CLI", "arguments", "to", "get", "a", "run", "configuration", ".", ":", "param", "args", ":", "CLI", "arguments", ":", "return", ":", "run", "configuration", "derived", "from", "the", "given", "CLI", "arguments" ]
python
train
googleapis/google-cloud-python
pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py#L92-L159
def maintain_leases(self): """Maintain all of the leases being managed. This method modifies the ack deadline for all of the managed ack IDs, then waits for most of that time (but with jitter), and repeats. """ while self._manager.is_active and not self._stop_event.is_set(): # Determine the appropriate duration for the lease. This is # based off of how long previous messages have taken to ack, with # a sensible default and within the ranges allowed by Pub/Sub. p99 = self._manager.ack_histogram.percentile(99) _LOGGER.debug("The current p99 value is %d seconds.", p99) # Make a copy of the leased messages. This is needed because it's # possible for another thread to modify the dictionary while # we're iterating over it. leased_messages = copy.copy(self._leased_messages) # Drop any leases that are well beyond max lease time. This # ensures that in the event of a badly behaving actor, we can # drop messages and allow Pub/Sub to resend them. cutoff = time.time() - self._manager.flow_control.max_lease_duration to_drop = [ requests.DropRequest(ack_id, item.size) for ack_id, item in six.iteritems(leased_messages) if item.added_time < cutoff ] if to_drop: _LOGGER.warning( "Dropping %s items because they were leased too long.", len(to_drop) ) self._manager.dispatcher.drop(to_drop) # Remove dropped items from our copy of the leased messages (they # have already been removed from the real one by # self._manager.drop(), which calls self.remove()). for item in to_drop: leased_messages.pop(item.ack_id) # Create a streaming pull request. # We do not actually call `modify_ack_deadline` over and over # because it is more efficient to make a single request. ack_ids = leased_messages.keys() if ack_ids: _LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids)) # NOTE: This may not work as expected if ``consumer.active`` # has changed since we checked it. An implementation # without any sort of race condition would require a # way for ``send_request`` to fail when the consumer # is inactive. self._manager.dispatcher.modify_ack_deadline( [requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids] ) # Now wait an appropriate period of time and do this again. # # We determine the appropriate period of time based on a random # period between 0 seconds and 90% of the lease. This use of # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases # where there are many clients. snooze = random.uniform(0.0, p99 * 0.9) _LOGGER.debug("Snoozing lease management for %f seconds.", snooze) self._stop_event.wait(timeout=snooze) _LOGGER.info("%s exiting.", _LEASE_WORKER_NAME)
[ "def", "maintain_leases", "(", "self", ")", ":", "while", "self", ".", "_manager", ".", "is_active", "and", "not", "self", ".", "_stop_event", ".", "is_set", "(", ")", ":", "# Determine the appropriate duration for the lease. This is", "# based off of how long previous ...
Maintain all of the leases being managed. This method modifies the ack deadline for all of the managed ack IDs, then waits for most of that time (but with jitter), and repeats.
[ "Maintain", "all", "of", "the", "leases", "being", "managed", "." ]
python
train
zarr-developers/zarr
zarr/hierarchy.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/hierarchy.py#L388-L411
def groups(self): """Return an iterator over (name, value) pairs for groups only. Examples -------- >>> import zarr >>> g1 = zarr.group() >>> g2 = g1.create_group('foo') >>> g3 = g1.create_group('bar') >>> d1 = g1.create_dataset('baz', shape=100, chunks=10) >>> d2 = g1.create_dataset('quux', shape=200, chunks=20) >>> for n, v in g1.groups(): ... print(n, type(v)) bar <class 'zarr.hierarchy.Group'> foo <class 'zarr.hierarchy.Group'> """ for key in sorted(listdir(self._store, self._path)): path = self._key_prefix + key if contains_group(self._store, path): yield key, Group(self._store, path=path, read_only=self._read_only, chunk_store=self._chunk_store, cache_attrs=self.attrs.cache, synchronizer=self._synchronizer)
[ "def", "groups", "(", "self", ")", ":", "for", "key", "in", "sorted", "(", "listdir", "(", "self", ".", "_store", ",", "self", ".", "_path", ")", ")", ":", "path", "=", "self", ".", "_key_prefix", "+", "key", "if", "contains_group", "(", "self", "....
Return an iterator over (name, value) pairs for groups only. Examples -------- >>> import zarr >>> g1 = zarr.group() >>> g2 = g1.create_group('foo') >>> g3 = g1.create_group('bar') >>> d1 = g1.create_dataset('baz', shape=100, chunks=10) >>> d2 = g1.create_dataset('quux', shape=200, chunks=20) >>> for n, v in g1.groups(): ... print(n, type(v)) bar <class 'zarr.hierarchy.Group'> foo <class 'zarr.hierarchy.Group'>
[ "Return", "an", "iterator", "over", "(", "name", "value", ")", "pairs", "for", "groups", "only", "." ]
python
train
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L2964-L3001
def _is_number_match_OO(numobj1_in, numobj2_in): """Takes two phone number objects and compares them for equality.""" # We only care about the fields that uniquely define a number, so we copy these across explicitly. numobj1 = _copy_core_fields_only(numobj1_in) numobj2 = _copy_core_fields_only(numobj2_in) # Early exit if both had extensions and these are different. if (numobj1.extension is not None and numobj2.extension is not None and numobj1.extension != numobj2.extension): return MatchType.NO_MATCH country_code1 = numobj1.country_code country_code2 = numobj2.country_code # Both had country_code specified. if country_code1 != 0 and country_code2 != 0: if numobj1 == numobj2: return MatchType.EXACT_MATCH elif (country_code1 == country_code2 and _is_national_number_suffix_of_other(numobj1, numobj2)): # A SHORT_NSN_MATCH occurs if there is a difference because of the # presence or absence of an 'Italian leading zero', the presence # or absence of an extension, or one NSN being a shorter variant # of the other. return MatchType.SHORT_NSN_MATCH # This is not a match. return MatchType.NO_MATCH # Checks cases where one or both country_code fields were not # specified. To make equality checks easier, we first set the country_code # fields to be equal. numobj1.country_code = country_code2 # If all else was the same, then this is an NSN_MATCH. if numobj1 == numobj2: return MatchType.NSN_MATCH if _is_national_number_suffix_of_other(numobj1, numobj2): return MatchType.SHORT_NSN_MATCH return MatchType.NO_MATCH
[ "def", "_is_number_match_OO", "(", "numobj1_in", ",", "numobj2_in", ")", ":", "# We only care about the fields that uniquely define a number, so we copy these across explicitly.", "numobj1", "=", "_copy_core_fields_only", "(", "numobj1_in", ")", "numobj2", "=", "_copy_core_fields_o...
Takes two phone number objects and compares them for equality.
[ "Takes", "two", "phone", "number", "objects", "and", "compares", "them", "for", "equality", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L28501-L28511
def on_dn_d_mode_change(self, dnd_mode): """Notification when the drag'n drop mode changes. in dnd_mode of type :class:`DnDMode` The new mode for drag'n drop. """ if not isinstance(dnd_mode, DnDMode): raise TypeError("dnd_mode can only be an instance of type DnDMode") self._call("onDnDModeChange", in_p=[dnd_mode])
[ "def", "on_dn_d_mode_change", "(", "self", ",", "dnd_mode", ")", ":", "if", "not", "isinstance", "(", "dnd_mode", ",", "DnDMode", ")", ":", "raise", "TypeError", "(", "\"dnd_mode can only be an instance of type DnDMode\"", ")", "self", ".", "_call", "(", "\"onDnDM...
Notification when the drag'n drop mode changes. in dnd_mode of type :class:`DnDMode` The new mode for drag'n drop.
[ "Notification", "when", "the", "drag", "n", "drop", "mode", "changes", "." ]
python
train
odlgroup/odl
odl/operator/pspace_ops.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/pspace_ops.py#L315-L386
def derivative(self, x): """Derivative of the product space operator. Parameters ---------- x : `domain` element The point to take the derivative in Returns ------- adjoint : linear`ProductSpaceOperator` The derivative Examples -------- >>> r3 = odl.rn(3) >>> pspace = odl.ProductSpace(r3, r3) >>> I = odl.IdentityOperator(r3) >>> x = pspace.element([[1, 2, 3], [4, 5, 6]]) Example with linear operator (derivative is itself) >>> prod_op = ProductSpaceOperator([[0, I], [0, 0]], ... domain=pspace, range=pspace) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) >>> prod_op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) Example with affine operator >>> residual_op = I - r3.element([1, 1, 1]) >>> op = ProductSpaceOperator([[0, residual_op], [0, 0]], ... domain=pspace, range=pspace) Calling operator gives offset by [1, 1, 1] >>> op(x) ProductSpace(rn(3), 2).element([ [ 3., 4., 5.], [ 0., 0., 0.] ]) Derivative of affine operator does not have this offset >>> op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) """ # Lazy import to improve `import odl` time import scipy.sparse # Short circuit optimization if self.is_linear: return self deriv_ops = [op.derivative(x[col]) for op, col in zip(self.ops.data, self.ops.col)] data = np.empty(len(deriv_ops), dtype=object) data[:] = deriv_ops indices = [self.ops.row, self.ops.col] shape = self.ops.shape deriv_matrix = scipy.sparse.coo_matrix((data, indices), shape) return ProductSpaceOperator(deriv_matrix, self.domain, self.range)
[ "def", "derivative", "(", "self", ",", "x", ")", ":", "# Lazy import to improve `import odl` time", "import", "scipy", ".", "sparse", "# Short circuit optimization", "if", "self", ".", "is_linear", ":", "return", "self", "deriv_ops", "=", "[", "op", ".", "derivati...
Derivative of the product space operator. Parameters ---------- x : `domain` element The point to take the derivative in Returns ------- adjoint : linear`ProductSpaceOperator` The derivative Examples -------- >>> r3 = odl.rn(3) >>> pspace = odl.ProductSpace(r3, r3) >>> I = odl.IdentityOperator(r3) >>> x = pspace.element([[1, 2, 3], [4, 5, 6]]) Example with linear operator (derivative is itself) >>> prod_op = ProductSpaceOperator([[0, I], [0, 0]], ... domain=pspace, range=pspace) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) >>> prod_op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) Example with affine operator >>> residual_op = I - r3.element([1, 1, 1]) >>> op = ProductSpaceOperator([[0, residual_op], [0, 0]], ... domain=pspace, range=pspace) Calling operator gives offset by [1, 1, 1] >>> op(x) ProductSpace(rn(3), 2).element([ [ 3., 4., 5.], [ 0., 0., 0.] ]) Derivative of affine operator does not have this offset >>> op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ])
[ "Derivative", "of", "the", "product", "space", "operator", "." ]
python
train
kinegratii/borax
borax/calendars/lunardate.py
https://github.com/kinegratii/borax/blob/921649f9277e3f657b6dea5a80e67de9ee5567f6/borax/calendars/lunardate.py#L288-L309
def get_term_info(year, month, day): """Parse solar term and stem-branch year/month/day from a solar date. (sy, sm, sd) => (term, next_gz_month) term for year 2101,:2101.1.5(初六) 小寒 2101.1.20(廿一) 大寒 """ if year == 2101: days = [5, 20] else: days = TermUtils.parse_term_days(year) term_index1 = 2 * (month - 1) term_index2 = 2 * (month - 1) + 1 day1 = days[term_index1] day2 = days[term_index2] if day == day1: term_name = TERMS_CN[term_index1] elif day == day2: term_name = TERMS_CN[term_index2] else: term_name = None next_gz_month = day >= day1 return term_name, next_gz_month
[ "def", "get_term_info", "(", "year", ",", "month", ",", "day", ")", ":", "if", "year", "==", "2101", ":", "days", "=", "[", "5", ",", "20", "]", "else", ":", "days", "=", "TermUtils", ".", "parse_term_days", "(", "year", ")", "term_index1", "=", "2...
Parse solar term and stem-branch year/month/day from a solar date. (sy, sm, sd) => (term, next_gz_month) term for year 2101,:2101.1.5(初六) 小寒 2101.1.20(廿一) 大寒
[ "Parse", "solar", "term", "and", "stem", "-", "branch", "year", "/", "month", "/", "day", "from", "a", "solar", "date", ".", "(", "sy", "sm", "sd", ")", "=", ">", "(", "term", "next_gz_month", ")", "term", "for", "year", "2101", ":", "2101", ".", ...
python
train
gem/oq-engine
openquake/hazardlib/gsim/edwards_fah_2013a.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/edwards_fah_2013a.py#L201-L208
def _compute_term_4(self, C, mag, R): """ (a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3) """ return ( (C['a16'] + C['a17'] * mag + C['a18'] * np.power(mag, 2) + C['a19'] * np.power(mag, 3)) * np.power(R, 3) )
[ "def", "_compute_term_4", "(", "self", ",", "C", ",", "mag", ",", "R", ")", ":", "return", "(", "(", "C", "[", "'a16'", "]", "+", "C", "[", "'a17'", "]", "*", "mag", "+", "C", "[", "'a18'", "]", "*", "np", ".", "power", "(", "mag", ",", "2"...
(a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3)
[ "(", "a16", "+", "a17", ".", "*", "M", "+", "a18", ".", "*", "M", ".", "*", "M", "+", "a19", ".", "*", "M", ".", "*", "M", ".", "*", "M", ")", ".", "*", "(", "d", "(", "r", ")", ".", "^3", ")" ]
python
train
codeinn/vcs
vcs/backends/git/repository.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/git/repository.py#L526-L574
def get_diff(self, rev1, rev2, path=None, ignore_whitespace=False, context=3): """ Returns (git like) *diff*, as plain text. Shows changes introduced by ``rev2`` since ``rev1``. :param rev1: Entry point from which diff is shown. Can be ``self.EMPTY_CHANGESET`` - in this case, patch showing all the changes since empty state of the repository until ``rev2`` :param rev2: Until which revision changes should be shown. :param ignore_whitespace: If set to ``True``, would not show whitespace changes. Defaults to ``False``. :param context: How many lines before/after changed lines should be shown. Defaults to ``3``. """ flags = ['-U%s' % context, '--full-index', '--binary', '-p', '-M', '--abbrev=40'] if ignore_whitespace: flags.append('-w') if hasattr(rev1, 'raw_id'): rev1 = getattr(rev1, 'raw_id') if hasattr(rev2, 'raw_id'): rev2 = getattr(rev2, 'raw_id') if rev1 == self.EMPTY_CHANGESET: rev2 = self.get_changeset(rev2).raw_id cmd = ' '.join(['show'] + flags + [rev2]) else: rev1 = self.get_changeset(rev1).raw_id rev2 = self.get_changeset(rev2).raw_id cmd = ' '.join(['diff'] + flags + [rev1, rev2]) if path: cmd += ' -- "%s"' % path stdout, stderr = self.run_git_command(cmd) # If we used 'show' command, strip first few lines (until actual diff # starts) if rev1 == self.EMPTY_CHANGESET: lines = stdout.splitlines() x = 0 for line in lines: if line.startswith('diff'): break x += 1 # Append new line just like 'diff' command do stdout = '\n'.join(lines[x:]) + '\n' return stdout
[ "def", "get_diff", "(", "self", ",", "rev1", ",", "rev2", ",", "path", "=", "None", ",", "ignore_whitespace", "=", "False", ",", "context", "=", "3", ")", ":", "flags", "=", "[", "'-U%s'", "%", "context", ",", "'--full-index'", ",", "'--binary'", ",", ...
Returns (git like) *diff*, as plain text. Shows changes introduced by ``rev2`` since ``rev1``. :param rev1: Entry point from which diff is shown. Can be ``self.EMPTY_CHANGESET`` - in this case, patch showing all the changes since empty state of the repository until ``rev2`` :param rev2: Until which revision changes should be shown. :param ignore_whitespace: If set to ``True``, would not show whitespace changes. Defaults to ``False``. :param context: How many lines before/after changed lines should be shown. Defaults to ``3``.
[ "Returns", "(", "git", "like", ")", "*", "diff", "*", "as", "plain", "text", ".", "Shows", "changes", "introduced", "by", "rev2", "since", "rev1", "." ]
python
train
doconix/django-mako-plus
django_mako_plus/filters.py
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/filters.py#L64-L124
def alternate_syntax(local, using, **kwargs): ''' A Mako filter that renders a block of text using a different template engine than Mako. The named template engine must be listed in settings.TEMPLATES. The template context variables are available in the embedded template. Specify kwargs to add additional variables created within the template. This is a kludge that should be used sparingly. The `dmp_include` template tag is often a better option. The following examples assume you have installed the django_mustache template engine in settings.py: ## Simple expression in Mustache syntax: ${ '{{ name }}' | template_syntax(local, 'django_mustache') } ## Embedded Mustache code block: <%block filter="template_syntax(local, 'django_mustache')"> {{#repo}} <b>{{name}}</b> {{/repo}} {{^repo}} No repos :( {{/repo}} </%block> Rendering Django or Jinja2 templates should be done with `django_syntax` and `jinja2_syntax` because it doesn't require the using parameter. ''' # get the request (the MakoTemplateAdapter above places this in the context) request = local.context['request'] if isinstance(local.context, RequestContext) else None # get the current Mako template object so we can attach the compiled string for later use # Mako caches and automatically recreates this if the file changes mako_template = local.template if not hasattr(mako_template, '__compiled_template_syntax'): mako_template.__compiled_template_syntax = {} # create a closure so we can still get to context and using (Mako filters take exactly one parameter: the string to filter) def wrap(template_st): # get the template object, or create and cache it try: template = mako_template.__compiled_template_syntax[template_st] except KeyError: engine = engines[using] template = engine.from_string(template_st) # using full string, even if long, as the key doesn't really affect performance of python's hash (see http://stackoverflow.com/questions/28150047/efficiency-of-long-str-keys-in-python-dictionary) mako_template.__compiled_template_syntax[template_st] = template # create a copy the context and add any kwargs to it dcontext = dict(local.context) dcontext.update(kwargs) # print a debug statement to the log log.debug('rendering embedded expression or block using %s template engine', using) # render the template with the context return template.render(context=dcontext, request=request) # return the embedded function return wrap
[ "def", "alternate_syntax", "(", "local", ",", "using", ",", "*", "*", "kwargs", ")", ":", "# get the request (the MakoTemplateAdapter above places this in the context)", "request", "=", "local", ".", "context", "[", "'request'", "]", "if", "isinstance", "(", "local", ...
A Mako filter that renders a block of text using a different template engine than Mako. The named template engine must be listed in settings.TEMPLATES. The template context variables are available in the embedded template. Specify kwargs to add additional variables created within the template. This is a kludge that should be used sparingly. The `dmp_include` template tag is often a better option. The following examples assume you have installed the django_mustache template engine in settings.py: ## Simple expression in Mustache syntax: ${ '{{ name }}' | template_syntax(local, 'django_mustache') } ## Embedded Mustache code block: <%block filter="template_syntax(local, 'django_mustache')"> {{#repo}} <b>{{name}}</b> {{/repo}} {{^repo}} No repos :( {{/repo}} </%block> Rendering Django or Jinja2 templates should be done with `django_syntax` and `jinja2_syntax` because it doesn't require the using parameter.
[ "A", "Mako", "filter", "that", "renders", "a", "block", "of", "text", "using", "a", "different", "template", "engine", "than", "Mako", ".", "The", "named", "template", "engine", "must", "be", "listed", "in", "settings", ".", "TEMPLATES", "." ]
python
train
mobinrg/rpi_spark_drives
JMRPiSpark/Drives/Audio/RPiAudio.py
https://github.com/mobinrg/rpi_spark_drives/blob/e1602d8268a5ef48e9e0a8b37de89e0233f946ea/JMRPiSpark/Drives/Audio/RPiAudio.py#L91-L115
def off(self): """! \~english Close Audio output. set pin mode to output @return a boolean value. if True means close audio output is OK otherwise failed to close. \~chinese 关闭音频输出。 将引脚模式设置为输出 @return 布尔值。 如果为 True 关闭音频输出成功,否则关闭不成功。 """ isOK = True try: if self.channelR!=None: sub.call(["gpio","-g","mode", "{}".format(self.channelR), self.PIN_MODE_OUTPUT ]) except: isOK = False print("Close audio right channel failed.") try: if self.channelL!=None: sub.call(["gpio","-g","mode", "{}".format(self.channelL), self.PIN_MODE_OUTPUT ]) except: isOK = False print("Close audio left channel failed.") return isOK
[ "def", "off", "(", "self", ")", ":", "isOK", "=", "True", "try", ":", "if", "self", ".", "channelR", "!=", "None", ":", "sub", ".", "call", "(", "[", "\"gpio\"", ",", "\"-g\"", ",", "\"mode\"", ",", "\"{}\"", ".", "format", "(", "self", ".", "cha...
! \~english Close Audio output. set pin mode to output @return a boolean value. if True means close audio output is OK otherwise failed to close. \~chinese 关闭音频输出。 将引脚模式设置为输出 @return 布尔值。 如果为 True 关闭音频输出成功,否则关闭不成功。
[ "!", "\\", "~english", "Close", "Audio", "output", ".", "set", "pin", "mode", "to", "output", "@return", "a", "boolean", "value", ".", "if", "True", "means", "close", "audio", "output", "is", "OK", "otherwise", "failed", "to", "close", "." ]
python
train
quintusdias/glymur
glymur/lib/openjpeg.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/lib/openjpeg.py#L497-L516
def encode(cinfo, cio, image): """Wrapper for openjpeg library function opj_encode. Encodes an image into a JPEG-2000 codestream. Parameters ---------- cinfo : compression handle cio : output buffer stream image : image to encode """ argtypes = [ctypes.POINTER(CompressionInfoType), ctypes.POINTER(CioType), ctypes.POINTER(ImageType)] OPENJPEG.opj_encode.argtypes = argtypes OPENJPEG.opj_encode.restype = ctypes.c_int status = OPENJPEG.opj_encode(cinfo, cio, image) return status
[ "def", "encode", "(", "cinfo", ",", "cio", ",", "image", ")", ":", "argtypes", "=", "[", "ctypes", ".", "POINTER", "(", "CompressionInfoType", ")", ",", "ctypes", ".", "POINTER", "(", "CioType", ")", ",", "ctypes", ".", "POINTER", "(", "ImageType", ")"...
Wrapper for openjpeg library function opj_encode. Encodes an image into a JPEG-2000 codestream. Parameters ---------- cinfo : compression handle cio : output buffer stream image : image to encode
[ "Wrapper", "for", "openjpeg", "library", "function", "opj_encode", "." ]
python
train
matousc89/padasip
padasip/filters/rls.py
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/rls.py#L208-L224
def adapt(self, d, x): """ Adapt weights according one desired value and its input. **Args:** * `d` : desired value (float) * `x` : input array (1-dimensional array) """ y = np.dot(self.w, x) e = d - y R1 = np.dot(np.dot(np.dot(self.R,x),x.T),self.R) R2 = self.mu + np.dot(np.dot(x,self.R),x.T) self.R = 1/self.mu * (self.R - R1/R2) dw = np.dot(self.R, x.T) * e self.w += dw
[ "def", "adapt", "(", "self", ",", "d", ",", "x", ")", ":", "y", "=", "np", ".", "dot", "(", "self", ".", "w", ",", "x", ")", "e", "=", "d", "-", "y", "R1", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "np", ".", "dot", "(", "sel...
Adapt weights according one desired value and its input. **Args:** * `d` : desired value (float) * `x` : input array (1-dimensional array)
[ "Adapt", "weights", "according", "one", "desired", "value", "and", "its", "input", "." ]
python
train
vallis/libstempo
libstempo/toasim.py
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L350-L360
def add_line(psr,f,A,offset=0.5): """ Add a line of frequency `f` [Hz] and amplitude `A` [s], with origin at a fraction `offset` through the dataset. """ t = psr.toas() t0 = offset * (N.max(t) - N.min(t)) sine = A * N.cos(2 * math.pi * f * day * (t - t0)) psr.stoas[:] += sine / day
[ "def", "add_line", "(", "psr", ",", "f", ",", "A", ",", "offset", "=", "0.5", ")", ":", "t", "=", "psr", ".", "toas", "(", ")", "t0", "=", "offset", "*", "(", "N", ".", "max", "(", "t", ")", "-", "N", ".", "min", "(", "t", ")", ")", "si...
Add a line of frequency `f` [Hz] and amplitude `A` [s], with origin at a fraction `offset` through the dataset.
[ "Add", "a", "line", "of", "frequency", "f", "[", "Hz", "]", "and", "amplitude", "A", "[", "s", "]", "with", "origin", "at", "a", "fraction", "offset", "through", "the", "dataset", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L820-L826
def htmlParseFile(filename, encoding): """parse an HTML file and build a tree. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time. """ ret = libxml2mod.htmlParseFile(filename, encoding) if ret is None:raise parserError('htmlParseFile() failed') return xmlDoc(_obj=ret)
[ "def", "htmlParseFile", "(", "filename", ",", "encoding", ")", ":", "ret", "=", "libxml2mod", ".", "htmlParseFile", "(", "filename", ",", "encoding", ")", "if", "ret", "is", "None", ":", "raise", "parserError", "(", "'htmlParseFile() failed'", ")", "return", ...
parse an HTML file and build a tree. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time.
[ "parse", "an", "HTML", "file", "and", "build", "a", "tree", ".", "Automatic", "support", "for", "ZLIB", "/", "Compress", "compressed", "document", "is", "provided", "by", "default", "if", "found", "at", "compile", "-", "time", "." ]
python
train
fbcotter/py3nvml
py3nvml/py3nvml.py
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L5333-L5369
def nvmlDeviceGetTopologyNearestGpus(device, level): r""" /** * Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level * For all products. * Supported on Linux only. * * @param device The identifier of the first device * @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs * @param count When zero, is set to the number of matching GPUs such that \a deviceArray * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count * number of device handles. * @param deviceArray An array of device handles for GPUs found at \a level * * @return * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery */ nvmlReturn_t DECLDIR nvmlDeviceGetTopologyNearestGpus """ c_count = c_uint(0) fn = _nvmlGetFunctionPointer("nvmlDeviceGetTopologyNearestGpus") # First call will get the size ret = fn(device, level, byref(c_count), None) if ret != NVML_SUCCESS: raise NVMLError(ret) # call again with a buffer device_array = c_nvmlDevice_t * c_count.value c_devices = device_array() ret = fn(device, level, byref(c_count), c_devices) _nvmlCheckReturn(ret) return list(c_devices[0:c_count.value])
[ "def", "nvmlDeviceGetTopologyNearestGpus", "(", "device", ",", "level", ")", ":", "c_count", "=", "c_uint", "(", "0", ")", "fn", "=", "_nvmlGetFunctionPointer", "(", "\"nvmlDeviceGetTopologyNearestGpus\"", ")", "# First call will get the size", "ret", "=", "fn", "(", ...
r""" /** * Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level * For all products. * Supported on Linux only. * * @param device The identifier of the first device * @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs * @param count When zero, is set to the number of matching GPUs such that \a deviceArray * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count * number of device handles. * @param deviceArray An array of device handles for GPUs found at \a level * * @return * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery */ nvmlReturn_t DECLDIR nvmlDeviceGetTopologyNearestGpus
[ "r", "/", "**", "*", "Retrieve", "the", "set", "of", "GPUs", "that", "are", "nearest", "to", "a", "given", "device", "at", "a", "specific", "interconnectivity", "level", "*", "For", "all", "products", ".", "*", "Supported", "on", "Linux", "only", ".", ...
python
train
emory-libraries/eulxml
eulxml/xmlmap/fields.py
https://github.com/emory-libraries/eulxml/blob/17d71c7d98c0cebda9932b7f13e72093805e1fe2/eulxml/xmlmap/fields.py#L1187-L1221
def get_field(self, schema): """Get the requested type definition from the schema and return the appropriate :class:`~eulxml.xmlmap.fields.Field`. :param schema: instance of :class:`eulxml.xmlmap.core.XsdSchema` :rtype: :class:`eulxml.xmlmap.fields.Field` """ type = schema.get_type(self.schema_type) logger.debug('Found schema type %s; base type %s, restricted values %s' % \ (self.schema_type, type.base_type(), type.restricted_values)) kwargs = {} if type.restricted_values: # field has a restriction with enumerated values - pass as choices to field # - empty value at beginning of list for unset value; for required fields, # will force user to select a value, rather than first item being default choices = [] choices.extend(type.restricted_values) # restricted values could include a blank # if it's there, remove it so we don't get two if '' in choices: choices.remove('') choices.insert(0, '') # add blank choice at the beginning of the list kwargs['choices'] = choices # TODO: possibly also useful to look for pattern restrictions basetype = type.base_type() if basetype == 'string': newfield = StringField(self.xpath, required=self.required, **kwargs) # copy original creation counter to newly created field # to preserve declaration order newfield.creation_counter = self.creation_counter return newfield else: raise Exception("basetype %s is not yet supported by SchemaField" % basetype)
[ "def", "get_field", "(", "self", ",", "schema", ")", ":", "type", "=", "schema", ".", "get_type", "(", "self", ".", "schema_type", ")", "logger", ".", "debug", "(", "'Found schema type %s; base type %s, restricted values %s'", "%", "(", "self", ".", "schema_type...
Get the requested type definition from the schema and return the appropriate :class:`~eulxml.xmlmap.fields.Field`. :param schema: instance of :class:`eulxml.xmlmap.core.XsdSchema` :rtype: :class:`eulxml.xmlmap.fields.Field`
[ "Get", "the", "requested", "type", "definition", "from", "the", "schema", "and", "return", "the", "appropriate", ":", "class", ":", "~eulxml", ".", "xmlmap", ".", "fields", ".", "Field", "." ]
python
train
radical-cybertools/radical.entk
src/radical/entk/execman/rp/task_processor.py
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/execman/rp/task_processor.py#L423-L472
def create_task_from_cu(cu, prof=None): """ Purpose: Create a Task based on the Compute Unit. Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD. Also, this is not required for the most part. TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU :arguments: :cu: RP Compute Unit :return: Task """ try: logger.debug('Create Task from CU %s' % cu.name) if prof: prof.prof('task from cu - create', uid=cu.name.split(',')[0].strip()) task = Task() task.uid = cu.name.split(',')[0].strip() task.name = cu.name.split(',')[1].strip() task.parent_stage['uid'] = cu.name.split(',')[2].strip() task.parent_stage['name'] = cu.name.split(',')[3].strip() task.parent_pipeline['uid'] = cu.name.split(',')[4].strip() task.parent_pipeline['name'] = cu.name.split(',')[5].strip() task.rts_uid = cu.uid if cu.state == rp.DONE: task.exit_code = 0 else: task.exit_code = 1 task.path = ru.Url(cu.sandbox).path if prof: prof.prof('task from cu - done', uid=cu.name.split(',')[0].strip()) logger.debug('Task %s created from CU %s' % (task.uid, cu.name)) return task except Exception, ex: logger.exception('Task creation from CU failed, error: %s' % ex) raise
[ "def", "create_task_from_cu", "(", "cu", ",", "prof", "=", "None", ")", ":", "try", ":", "logger", ".", "debug", "(", "'Create Task from CU %s'", "%", "cu", ".", "name", ")", "if", "prof", ":", "prof", ".", "prof", "(", "'task from cu - create'", ",", "u...
Purpose: Create a Task based on the Compute Unit. Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD. Also, this is not required for the most part. TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU :arguments: :cu: RP Compute Unit :return: Task
[ "Purpose", ":", "Create", "a", "Task", "based", "on", "the", "Compute", "Unit", "." ]
python
train
numenta/nupic
src/nupic/swarming/hypersearch/permutation_helpers.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/permutation_helpers.py#L212-L236
def newPosition(self, globalBestPosition, rng): """See comments in base class.""" # First, update the velocity. The new velocity is given as: # v = (inertia * v) + (cogRate * r1 * (localBest-pos)) # + (socRate * r2 * (globalBest-pos)) # # where r1 and r2 are random numbers between 0 and 1.0 lb=float(Configuration.get("nupic.hypersearch.randomLowerBound")) ub=float(Configuration.get("nupic.hypersearch.randomUpperBound")) self._velocity = (self._velocity * self._inertia + rng.uniform(lb, ub) * self._cogRate * (self._bestPosition - self.getPosition())) if globalBestPosition is not None: self._velocity += rng.uniform(lb, ub) * self._socRate * ( globalBestPosition - self.getPosition()) # update position based on velocity self._position += self._velocity # Clip it self._position = max(self.min, self._position) self._position = min(self.max, self._position) # Return it return self.getPosition()
[ "def", "newPosition", "(", "self", ",", "globalBestPosition", ",", "rng", ")", ":", "# First, update the velocity. The new velocity is given as:", "# v = (inertia * v) + (cogRate * r1 * (localBest-pos))", "# + (socRate * r2 * (globalBest-pos))", "#", "# where r1 and r...
See comments in base class.
[ "See", "comments", "in", "base", "class", "." ]
python
valid
Dallinger/Dallinger
dallinger/data.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/data.py#L139-L145
def register(id, url=None): """Register a UUID key in the global S3 bucket.""" bucket = registration_s3_bucket() key = registration_key(id) obj = bucket.Object(key) obj.put(Body=url or "missing") return _generate_s3_url(bucket, key)
[ "def", "register", "(", "id", ",", "url", "=", "None", ")", ":", "bucket", "=", "registration_s3_bucket", "(", ")", "key", "=", "registration_key", "(", "id", ")", "obj", "=", "bucket", ".", "Object", "(", "key", ")", "obj", ".", "put", "(", "Body", ...
Register a UUID key in the global S3 bucket.
[ "Register", "a", "UUID", "key", "in", "the", "global", "S3", "bucket", "." ]
python
train
peterldowns/lggr
lggr/__init__.py
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L263-L268
def error(self, msg, *args, **kwargs): """ Log a message with ERROR level. Automatically includes stack and process info unless they are specifically not included. """ kwargs.setdefault('inc_stackinfo', True) kwargs.setdefault('inc_multiproc', True) self.log(ERROR, msg, args, **kwargs)
[ "def", "error", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'inc_stackinfo'", ",", "True", ")", "kwargs", ".", "setdefault", "(", "'inc_multiproc'", ",", "True", ")", "self", ".", "...
Log a message with ERROR level. Automatically includes stack and process info unless they are specifically not included.
[ "Log", "a", "message", "with", "ERROR", "level", ".", "Automatically", "includes", "stack", "and", "process", "info", "unless", "they", "are", "specifically", "not", "included", "." ]
python
train
wbond/certbuilder
certbuilder/__init__.py
https://github.com/wbond/certbuilder/blob/969dae884fa7f73988bbf1dcbec4fb51e234a3c5/certbuilder/__init__.py#L688-L699
def ocsp_no_check(self, value): """ A bool - if the certificate should have the OCSP no check extension. Only applicable to certificates created for signing OCSP responses. Such certificates should normally be issued for a very short period of time since they are effectively whitelisted by clients. """ if value is None: self._ocsp_no_check = None else: self._ocsp_no_check = bool(value)
[ "def", "ocsp_no_check", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "self", ".", "_ocsp_no_check", "=", "None", "else", ":", "self", ".", "_ocsp_no_check", "=", "bool", "(", "value", ")" ]
A bool - if the certificate should have the OCSP no check extension. Only applicable to certificates created for signing OCSP responses. Such certificates should normally be issued for a very short period of time since they are effectively whitelisted by clients.
[ "A", "bool", "-", "if", "the", "certificate", "should", "have", "the", "OCSP", "no", "check", "extension", ".", "Only", "applicable", "to", "certificates", "created", "for", "signing", "OCSP", "responses", ".", "Such", "certificates", "should", "normally", "be...
python
train
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/client.py
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L554-L589
def set_default_calibrator(self, parameter, type, data): # pylint: disable=W0622 """ Apply a calibrator while processing raw values of the specified parameter. If there is already a default calibrator associated to this parameter, that calibrator gets replaced. .. note:: Contextual calibrators take precedence over the default calibrator See :meth:`set_calibrators` for setting contextual calibrators. Two types of calibrators can be applied: * Polynomial calibrators apply a polynomial expression of the form: `y = a + bx + cx^2 + ...`. The `data` argument must be an array of floats ``[a, b, c, ...]``. * Spline calibrators interpolate the raw value between a set of points which represent a linear curve. The `data` argument must be an array of ``[x, y]`` points. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param str type: One of ``polynomial`` or ``spline``. :param data: Calibration definition for the selected type. """ req = mdb_pb2.ChangeParameterRequest() req.action = mdb_pb2.ChangeParameterRequest.SET_DEFAULT_CALIBRATOR if type: _add_calib(req.defaultCalibrator, type, data) url = '/mdb/{}/{}/parameters/{}'.format( self._instance, self._processor, parameter) response = self._client.post_proto(url, data=req.SerializeToString())
[ "def", "set_default_calibrator", "(", "self", ",", "parameter", ",", "type", ",", "data", ")", ":", "# pylint: disable=W0622", "req", "=", "mdb_pb2", ".", "ChangeParameterRequest", "(", ")", "req", ".", "action", "=", "mdb_pb2", ".", "ChangeParameterRequest", "....
Apply a calibrator while processing raw values of the specified parameter. If there is already a default calibrator associated to this parameter, that calibrator gets replaced. .. note:: Contextual calibrators take precedence over the default calibrator See :meth:`set_calibrators` for setting contextual calibrators. Two types of calibrators can be applied: * Polynomial calibrators apply a polynomial expression of the form: `y = a + bx + cx^2 + ...`. The `data` argument must be an array of floats ``[a, b, c, ...]``. * Spline calibrators interpolate the raw value between a set of points which represent a linear curve. The `data` argument must be an array of ``[x, y]`` points. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param str type: One of ``polynomial`` or ``spline``. :param data: Calibration definition for the selected type.
[ "Apply", "a", "calibrator", "while", "processing", "raw", "values", "of", "the", "specified", "parameter", ".", "If", "there", "is", "already", "a", "default", "calibrator", "associated", "to", "this", "parameter", "that", "calibrator", "gets", "replaced", "." ]
python
train
rmed/pyemtmad
pyemtmad/api/parking.py
https://github.com/rmed/pyemtmad/blob/c21c42d0c7b50035dfed29540d7e64ab67833728/pyemtmad/api/parking.py#L308-L329
def list_types_poi(self, **kwargs): """Obtain a list of families, types and categories of POI. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[ParkingPoiType]), or message string in case of error. """ # Endpoint parameters url_args = {'language': util.language_code(kwargs.get('lang'))} # Request result = self.make_request('list_poi_types', url_args) if not util.check_result(result): return False, result.get('message', 'UNKNOWN ERROR') # Parse values = util.response_list(result, 'Data') return True, [emtype.ParkingPoiType(**a) for a in values]
[ "def", "list_types_poi", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Endpoint parameters", "url_args", "=", "{", "'language'", ":", "util", ".", "language_code", "(", "kwargs", ".", "get", "(", "'lang'", ")", ")", "}", "# Request", "result", "=", ...
Obtain a list of families, types and categories of POI. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[ParkingPoiType]), or message string in case of error.
[ "Obtain", "a", "list", "of", "families", "types", "and", "categories", "of", "POI", "." ]
python
train
Asana/python-asana
asana/resources/gen/tasks.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/tasks.py#L241-L252
def remove_followers(self, task, params={}, **options): """Removes each of the specified followers from the task if they are following. Returns the complete, updated record for the affected task. Parameters ---------- task : {Id} The task to remove followers from. [data] : {Object} Data for the request - followers : {Array} An array of followers to remove from the task. """ path = "/tasks/%s/removeFollowers" % (task) return self.client.post(path, params, **options)
[ "def", "remove_followers", "(", "self", ",", "task", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/tasks/%s/removeFollowers\"", "%", "(", "task", ")", "return", "self", ".", "client", ".", "post", "(", "path", ",", ...
Removes each of the specified followers from the task if they are following. Returns the complete, updated record for the affected task. Parameters ---------- task : {Id} The task to remove followers from. [data] : {Object} Data for the request - followers : {Array} An array of followers to remove from the task.
[ "Removes", "each", "of", "the", "specified", "followers", "from", "the", "task", "if", "they", "are", "following", ".", "Returns", "the", "complete", "updated", "record", "for", "the", "affected", "task", "." ]
python
train
af/turrentine
turrentine/views.py
https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/views.py#L78-L89
def _try_url_with_appended_slash(self): """ Try our URL with an appended slash. If a CMS page is found at that URL, redirect to it. If no page is found at that URL, raise Http404. """ new_url_to_try = self.kwargs.get('path', '') + '/' if not new_url_to_try.startswith('/'): new_url_to_try = '/' + new_url_to_try if CMSPage.objects.published().filter(url=new_url_to_try).exists(): return HttpResponsePermanentRedirect(new_url_to_try) else: raise Http404
[ "def", "_try_url_with_appended_slash", "(", "self", ")", ":", "new_url_to_try", "=", "self", ".", "kwargs", ".", "get", "(", "'path'", ",", "''", ")", "+", "'/'", "if", "not", "new_url_to_try", ".", "startswith", "(", "'/'", ")", ":", "new_url_to_try", "="...
Try our URL with an appended slash. If a CMS page is found at that URL, redirect to it. If no page is found at that URL, raise Http404.
[ "Try", "our", "URL", "with", "an", "appended", "slash", ".", "If", "a", "CMS", "page", "is", "found", "at", "that", "URL", "redirect", "to", "it", ".", "If", "no", "page", "is", "found", "at", "that", "URL", "raise", "Http404", "." ]
python
train
ariebovenberg/gentools
gentools/core.py
https://github.com/ariebovenberg/gentools/blob/4a1f9f928c7f8b4752b69168858e83b4b23d6bcb/gentools/core.py#L351-L372
def imap_send(func, gen): """Apply a function to all ``send`` values of a generator Parameters ---------- func: ~typing.Callable[[T_send], T_mapped] the function to apply gen: Generable[T_yield, T_mapped, T_return] the generator iterable. Returns ------- ~typing.Generator[T_yield, T_send, T_return] the mapped generator """ gen = iter(gen) assert _is_just_started(gen) yielder = yield_from(gen) for item in yielder: with yielder: yielder.send(func((yield item))) return_(yielder.result)
[ "def", "imap_send", "(", "func", ",", "gen", ")", ":", "gen", "=", "iter", "(", "gen", ")", "assert", "_is_just_started", "(", "gen", ")", "yielder", "=", "yield_from", "(", "gen", ")", "for", "item", "in", "yielder", ":", "with", "yielder", ":", "yi...
Apply a function to all ``send`` values of a generator Parameters ---------- func: ~typing.Callable[[T_send], T_mapped] the function to apply gen: Generable[T_yield, T_mapped, T_return] the generator iterable. Returns ------- ~typing.Generator[T_yield, T_send, T_return] the mapped generator
[ "Apply", "a", "function", "to", "all", "send", "values", "of", "a", "generator" ]
python
valid
pypa/pipenv
pipenv/vendor/jinja2/nodes.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/nodes.py#L219-L226
def set_environment(self, environment): """Set the environment for all nodes.""" todo = deque([self]) while todo: node = todo.popleft() node.environment = environment todo.extend(node.iter_child_nodes()) return self
[ "def", "set_environment", "(", "self", ",", "environment", ")", ":", "todo", "=", "deque", "(", "[", "self", "]", ")", "while", "todo", ":", "node", "=", "todo", ".", "popleft", "(", ")", "node", ".", "environment", "=", "environment", "todo", ".", "...
Set the environment for all nodes.
[ "Set", "the", "environment", "for", "all", "nodes", "." ]
python
train
mikedh/trimesh
trimesh/primitives.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/primitives.py#L639-L651
def direction(self): """ Based on the extrudes transform, what is the vector along which the polygon will be extruded Returns --------- direction: (3,) float vector. If self.primitive.transform is an identity matrix this will be [0.0, 0.0, 1.0] """ direction = np.dot(self.primitive.transform[:3, :3], [0.0, 0.0, np.sign(self.primitive.height)]) return direction
[ "def", "direction", "(", "self", ")", ":", "direction", "=", "np", ".", "dot", "(", "self", ".", "primitive", ".", "transform", "[", ":", "3", ",", ":", "3", "]", ",", "[", "0.0", ",", "0.0", ",", "np", ".", "sign", "(", "self", ".", "primitive...
Based on the extrudes transform, what is the vector along which the polygon will be extruded Returns --------- direction: (3,) float vector. If self.primitive.transform is an identity matrix this will be [0.0, 0.0, 1.0]
[ "Based", "on", "the", "extrudes", "transform", "what", "is", "the", "vector", "along", "which", "the", "polygon", "will", "be", "extruded" ]
python
train
markovmodel/PyEMMA
pyemma/plots/plots2d.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L253-L377
def plot_map( x, y, z, ax=None, cmap=None, ncontours=100, vmin=None, vmax=None, levels=None, cbar=True, cax=None, cbar_label=None, cbar_orientation='vertical', norm=None, **kwargs): """Plot a two-dimensional map from data on a grid. Parameters ---------- x : ndarray(T) Binned x-coordinates. y : ndarray(T) Binned y-coordinates. z : ndarray(T) Binned z-coordinates. ax : matplotlib.Axes object, optional, default=None The ax to plot to; if ax=None, a new ax (and fig) is created. cmap : matplotlib colormap, optional, default=None The color map to use. ncontours : int, optional, default=100 Number of contour levels. vmin : float, optional, default=None Lowest z-value to be plotted. vmax : float, optional, default=None Highest z-value to be plotted. levels : iterable of float, optional, default=None Contour levels to plot. cbar : boolean, optional, default=True Plot a color bar. cax : matplotlib.Axes object, optional, default=None Plot the colorbar into a custom axes object instead of stealing space from ax. cbar_label : str, optional, default=None Colorbar label string; use None to suppress it. cbar_orientation : str, optional, default='vertical' Colorbar orientation; choose 'vertical' or 'horizontal'. norm : matplotlib norm, optional, default=None Use a norm when coloring the contour plot. Optional parameters for contourf (**kwargs) ------------------------------------------- corner_mask : boolean, optional Enable/disable corner masking, which only has an effect if z is a masked array. If False, any quad touching a masked point is masked out. If True, only the triangular corners of quads nearest those points are always masked out, other triangular corners comprising three unmasked points are contoured as usual. Defaults to rcParams['contour.corner_mask'], which defaults to True. alpha : float The alpha blending value. locator : [ None | ticker.Locator subclass ] If locator is None, the default MaxNLocator is used. The locator is used to determine the contour levels if they are not given explicitly via the levels argument. extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ] Unless this is ‘neither’, contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via matplotlib.colors.Colormap.set_under() and matplotlib.colors.Colormap.set_over() methods. xunits, yunits : [ None | registered units ] Override axis units by specifying an instance of a matplotlib.units.ConversionInterface. antialiased : boolean, optional Enable antialiasing, overriding the defaults. For filled contours, the default is True. For line contours, it is taken from rcParams[‘lines.antialiased’]. nchunk : [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of nchunk by nchunk quads. Chunking reduces the maximum length of polygons generated by the contouring algorithm which reduces the rendering workload passed on to the backend and also requires slightly less RAM. It can however introduce rendering artifacts at chunk boundaries depending on the backend, the antialiased flag and value of alpha. hatches : A list of cross hatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Hatching is supported in the PostScript, PDF, SVG and Agg backends only. zorder : float Set the zorder for the artist. Artists with lower zorder values are drawn first. Returns ------- fig : matplotlib.Figure object The figure in which the used ax resides. ax : matplotlib.Axes object The ax in which the map was plotted. misc : dict Contains a matplotlib.contour.QuadContourSet 'mappable' and, if requested, a matplotlib.Colorbar object 'cbar'. """ import matplotlib.pyplot as _plt if ax is None: fig, ax = _plt.subplots() else: fig = ax.get_figure() mappable = ax.contourf( x, y, z, ncontours, norm=norm, vmin=vmin, vmax=vmax, cmap=cmap, levels=levels, **_prune_kwargs(kwargs)) misc = dict(mappable=mappable) if cbar_orientation not in ('horizontal', 'vertical'): raise ValueError( 'cbar_orientation must be "horizontal" or "vertical"') if cbar: if cax is None: cbar_ = fig.colorbar( mappable, ax=ax, orientation=cbar_orientation) else: cbar_ = fig.colorbar( mappable, cax=cax, orientation=cbar_orientation) if cbar_label is not None: cbar_.set_label(cbar_label) misc.update(cbar=cbar_) return fig, ax, misc
[ "def", "plot_map", "(", "x", ",", "y", ",", "z", ",", "ax", "=", "None", ",", "cmap", "=", "None", ",", "ncontours", "=", "100", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "levels", "=", "None", ",", "cbar", "=", "True", ",", "c...
Plot a two-dimensional map from data on a grid. Parameters ---------- x : ndarray(T) Binned x-coordinates. y : ndarray(T) Binned y-coordinates. z : ndarray(T) Binned z-coordinates. ax : matplotlib.Axes object, optional, default=None The ax to plot to; if ax=None, a new ax (and fig) is created. cmap : matplotlib colormap, optional, default=None The color map to use. ncontours : int, optional, default=100 Number of contour levels. vmin : float, optional, default=None Lowest z-value to be plotted. vmax : float, optional, default=None Highest z-value to be plotted. levels : iterable of float, optional, default=None Contour levels to plot. cbar : boolean, optional, default=True Plot a color bar. cax : matplotlib.Axes object, optional, default=None Plot the colorbar into a custom axes object instead of stealing space from ax. cbar_label : str, optional, default=None Colorbar label string; use None to suppress it. cbar_orientation : str, optional, default='vertical' Colorbar orientation; choose 'vertical' or 'horizontal'. norm : matplotlib norm, optional, default=None Use a norm when coloring the contour plot. Optional parameters for contourf (**kwargs) ------------------------------------------- corner_mask : boolean, optional Enable/disable corner masking, which only has an effect if z is a masked array. If False, any quad touching a masked point is masked out. If True, only the triangular corners of quads nearest those points are always masked out, other triangular corners comprising three unmasked points are contoured as usual. Defaults to rcParams['contour.corner_mask'], which defaults to True. alpha : float The alpha blending value. locator : [ None | ticker.Locator subclass ] If locator is None, the default MaxNLocator is used. The locator is used to determine the contour levels if they are not given explicitly via the levels argument. extend : [ ‘neither’ | ‘both’ | ‘min’ | ‘max’ ] Unless this is ‘neither’, contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via matplotlib.colors.Colormap.set_under() and matplotlib.colors.Colormap.set_over() methods. xunits, yunits : [ None | registered units ] Override axis units by specifying an instance of a matplotlib.units.ConversionInterface. antialiased : boolean, optional Enable antialiasing, overriding the defaults. For filled contours, the default is True. For line contours, it is taken from rcParams[‘lines.antialiased’]. nchunk : [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of nchunk by nchunk quads. Chunking reduces the maximum length of polygons generated by the contouring algorithm which reduces the rendering workload passed on to the backend and also requires slightly less RAM. It can however introduce rendering artifacts at chunk boundaries depending on the backend, the antialiased flag and value of alpha. hatches : A list of cross hatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Hatching is supported in the PostScript, PDF, SVG and Agg backends only. zorder : float Set the zorder for the artist. Artists with lower zorder values are drawn first. Returns ------- fig : matplotlib.Figure object The figure in which the used ax resides. ax : matplotlib.Axes object The ax in which the map was plotted. misc : dict Contains a matplotlib.contour.QuadContourSet 'mappable' and, if requested, a matplotlib.Colorbar object 'cbar'.
[ "Plot", "a", "two", "-", "dimensional", "map", "from", "data", "on", "a", "grid", "." ]
python
train
trailofbits/manticore
manticore/core/smtlib/visitors.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/core/smtlib/visitors.py#L529-L539
def visit_BitVecShiftLeft(self, expression, *operands): """ a << 0 => a remove zero a << ct => 0 if ct > sizeof(a) remove big constant shift """ left = expression.operands[0] right = expression.operands[1] if isinstance(right, BitVecConstant): if right.value == 0: return left elif right.value >= right.size: return left
[ "def", "visit_BitVecShiftLeft", "(", "self", ",", "expression", ",", "*", "operands", ")", ":", "left", "=", "expression", ".", "operands", "[", "0", "]", "right", "=", "expression", ".", "operands", "[", "1", "]", "if", "isinstance", "(", "right", ",", ...
a << 0 => a remove zero a << ct => 0 if ct > sizeof(a) remove big constant shift
[ "a", "<<", "0", "=", ">", "a", "remove", "zero", "a", "<<", "ct", "=", ">", "0", "if", "ct", ">", "sizeof", "(", "a", ")", "remove", "big", "constant", "shift" ]
python
valid
waqasbhatti/astrobase
astrobase/services/dust.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/services/dust.py#L88-L257
def extinction_query(lon, lat, coordtype='equatorial', sizedeg=5.0, forcefetch=False, cachedir='~/.astrobase/dust-cache', verbose=True, timeout=10.0, jitter=5.0): '''This queries the 2MASS DUST service to find the extinction parameters for the given `lon`, `lat`. Parameters ---------- lon,lat: float These are decimal right ascension and declination if `coordtype = 'equatorial'`. These are are decimal Galactic longitude and latitude if `coordtype = 'galactic'`. coordtype : {'equatorial','galactic'} Sets the type of coordinates passed in as `lon`, `lat`. sizedeg : float This is the width of the image returned by the DUST service. This can usually be left as-is if you're interested in the extinction only. forcefetch : bool If this is True, the query will be retried even if cached results for it exist. cachedir : str This points to the directory where results will be downloaded. verbose : bool If True, will indicate progress and warn of any issues. timeout : float This sets the amount of time in seconds to wait for the service to respond to our request. jitter : float This is used to control the scale of the random wait in seconds before starting the query. Useful in parallelized situations. Returns ------- dict A dict of the following form is returned:: {'Amag':{dict of extinction A_v values for several mag systems}, 'table': array containing the full extinction table, 'tablefile': the path to the full extinction table file on disk, 'provenance': 'cached' or 'new download', 'request': string repr of the request made to 2MASS DUST} ''' dustparams = DUST_PARAMS.copy() # convert the lon, lat to the required format # and generate the param dict if coordtype == 'equatorial': locstr = '%.3f %.3f Equ J2000' % (lon, lat) elif coordtype == 'galactic': locstr = '%.3f %.3f gal' % (lon, lat) else: LOGERROR('unknown coordinate type: %s' % coordtype) return None dustparams['locstr'] = locstr dustparams['regSize'] = '%.3f' % sizedeg # see if the cachedir exists if '~' in cachedir: cachedir = os.path.expanduser(cachedir) if not os.path.exists(cachedir): os.makedirs(cachedir) # generate the cachekey and cache filename cachekey = '%s - %.1f' % (locstr, sizedeg) cachekey = hashlib.sha256(cachekey.encode()).hexdigest() cachefname = os.path.join(cachedir, '%s.txt' % cachekey) provenance = 'cache' # if this does not exist in cache or if we're forcefetching, do the query if forcefetch or (not os.path.exists(cachefname)): time.sleep(random.randint(1,jitter)) provenance = 'new download' try: if verbose: LOGINFO('submitting 2MASS DUST request for ' 'lon = %.3f, lat = %.3f, type = %s, size = %.1f' % (lon, lat, coordtype, sizedeg)) req = requests.get(DUST_URL, dustparams, timeout=timeout) req.raise_for_status() resp = req.text # see if we got an extinction table URL in the response tableurl = DUST_REGEX.search(resp) # if we did, download it to the cache directory if tableurl: tableurl = tableurl.group(0) req2 = requests.get(tableurl, timeout=timeout) # write the table to the cache directory with open(cachefname,'wb') as outfd: outfd.write(req2.content) tablefname = cachefname else: LOGERROR('could not get extinction parameters for ' '%s (%.3f, %.3f) with size = %.1f' % (coordtype, lon,lat,sizedeg)) LOGERROR('error from DUST service follows:\n%s' % resp) return None except requests.exceptions.Timeout as e: LOGERROR('DUST request timed out for ' '%s (%.3f, %.3f) with size = %.1f' % (coordtype, lon,lat,sizedeg)) return None except Exception as e: LOGEXCEPTION('DUST request failed for ' '%s (%.3f, %.3f) with size = %.1f' % (coordtype, lon,lat,sizedeg)) return None # if this result is available in the cache, get it from there else: if verbose: LOGINFO('getting cached 2MASS DUST result for ' 'lon = %.3f, lat = %.3f, coordtype = %s, size = %.1f' % (lon, lat, coordtype, sizedeg)) tablefname = cachefname # # now we should have the extinction table in some form # # read and parse the extinction table using astropy.Table extinction_table = Table.read(tablefname, format='ascii.ipac') # get the columns we need filters = np.array(extinction_table['Filter_name']) a_sf11_byfilter = np.array(extinction_table['A_SandF']) a_sfd98_byfilter = np.array(extinction_table['A_SFD']) # generate the output dict extdict = {'Amag':{x:{'sf11':y, 'sfd98':z} for x,y,z in zip(filters,a_sf11_byfilter,a_sfd98_byfilter)}, 'table':np.array(extinction_table), 'tablefile':os.path.abspath(cachefname), 'provenance':provenance, 'request':'%s (%.3f, %.3f) with size = %.1f' % (coordtype, lon,lat, sizedeg)} return extdict
[ "def", "extinction_query", "(", "lon", ",", "lat", ",", "coordtype", "=", "'equatorial'", ",", "sizedeg", "=", "5.0", ",", "forcefetch", "=", "False", ",", "cachedir", "=", "'~/.astrobase/dust-cache'", ",", "verbose", "=", "True", ",", "timeout", "=", "10.0"...
This queries the 2MASS DUST service to find the extinction parameters for the given `lon`, `lat`. Parameters ---------- lon,lat: float These are decimal right ascension and declination if `coordtype = 'equatorial'`. These are are decimal Galactic longitude and latitude if `coordtype = 'galactic'`. coordtype : {'equatorial','galactic'} Sets the type of coordinates passed in as `lon`, `lat`. sizedeg : float This is the width of the image returned by the DUST service. This can usually be left as-is if you're interested in the extinction only. forcefetch : bool If this is True, the query will be retried even if cached results for it exist. cachedir : str This points to the directory where results will be downloaded. verbose : bool If True, will indicate progress and warn of any issues. timeout : float This sets the amount of time in seconds to wait for the service to respond to our request. jitter : float This is used to control the scale of the random wait in seconds before starting the query. Useful in parallelized situations. Returns ------- dict A dict of the following form is returned:: {'Amag':{dict of extinction A_v values for several mag systems}, 'table': array containing the full extinction table, 'tablefile': the path to the full extinction table file on disk, 'provenance': 'cached' or 'new download', 'request': string repr of the request made to 2MASS DUST}
[ "This", "queries", "the", "2MASS", "DUST", "service", "to", "find", "the", "extinction", "parameters", "for", "the", "given", "lon", "lat", "." ]
python
valid
saltstack/salt
salt/modules/twilio_notify.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/twilio_notify.py#L57-L67
def _get_twilio(profile): ''' Return the twilio connection ''' creds = __salt__['config.option'](profile) client = TwilioRestClient( creds.get('twilio.account_sid'), creds.get('twilio.auth_token'), ) return client
[ "def", "_get_twilio", "(", "profile", ")", ":", "creds", "=", "__salt__", "[", "'config.option'", "]", "(", "profile", ")", "client", "=", "TwilioRestClient", "(", "creds", ".", "get", "(", "'twilio.account_sid'", ")", ",", "creds", ".", "get", "(", "'twil...
Return the twilio connection
[ "Return", "the", "twilio", "connection" ]
python
train
gawel/aiocron
aiocron/__init__.py
https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L71-L73
def get_next(self): """Return next iteration time related to loop time""" return self.loop_time + (self.croniter.get_next(float) - self.time)
[ "def", "get_next", "(", "self", ")", ":", "return", "self", ".", "loop_time", "+", "(", "self", ".", "croniter", ".", "get_next", "(", "float", ")", "-", "self", ".", "time", ")" ]
Return next iteration time related to loop time
[ "Return", "next", "iteration", "time", "related", "to", "loop", "time" ]
python
train
dw/mitogen
mitogen/parent.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/parent.py#L807-L819
def wstatus_to_str(status): """ Parse and format a :func:`os.waitpid` exit status. """ if os.WIFEXITED(status): return 'exited with return code %d' % (os.WEXITSTATUS(status),) if os.WIFSIGNALED(status): n = os.WTERMSIG(status) return 'exited due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n)) if os.WIFSTOPPED(status): n = os.WSTOPSIG(status) return 'stopped due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n)) return 'unknown wait status (%d)' % (status,)
[ "def", "wstatus_to_str", "(", "status", ")", ":", "if", "os", ".", "WIFEXITED", "(", "status", ")", ":", "return", "'exited with return code %d'", "%", "(", "os", ".", "WEXITSTATUS", "(", "status", ")", ",", ")", "if", "os", ".", "WIFSIGNALED", "(", "sta...
Parse and format a :func:`os.waitpid` exit status.
[ "Parse", "and", "format", "a", ":", "func", ":", "os", ".", "waitpid", "exit", "status", "." ]
python
train
brutasse/graphite-api
graphite_api/render/glyph.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L401-L446
def applySettings(self, axisMin=None, axisMax=None, axisLimit=None): """Apply the specified settings to this axis. Set self.minValue, self.minValueSource, self.maxValue, self.maxValueSource, and self.axisLimit reasonably based on the parameters provided. Arguments: axisMin -- a finite number, or None to choose a round minimum limit that includes all of the data. axisMax -- a finite number, 'max' to use the maximum value contained in the data, or None to choose a round maximum limit that includes all of the data. axisLimit -- a finite number to use as an upper limit on maxValue, or None to impose no upper limit. """ if axisMin is not None and not math.isnan(axisMin): self.minValueSource = 'min' self.minValue = self.checkFinite(axisMin, 'axis min') if axisMax == 'max': self.maxValueSource = 'extremum' elif axisMax is not None and not math.isnan(axisMax): self.maxValueSource = 'max' self.maxValue = self.checkFinite(axisMax, 'axis max') if axisLimit is None or math.isnan(axisLimit): self.axisLimit = None elif axisLimit < self.maxValue: self.maxValue = self.checkFinite(axisLimit, 'axis limit') self.maxValueSource = 'limit' # The limit has already been imposed, so there is no need to # remember it: self.axisLimit = None elif math.isinf(axisLimit): # It must be positive infinity, which is the same as no limit: self.axisLimit = None else: # We still need to remember axisLimit to avoid rounding top to # a value larger than axisLimit: self.axisLimit = axisLimit self.reconcileLimits()
[ "def", "applySettings", "(", "self", ",", "axisMin", "=", "None", ",", "axisMax", "=", "None", ",", "axisLimit", "=", "None", ")", ":", "if", "axisMin", "is", "not", "None", "and", "not", "math", ".", "isnan", "(", "axisMin", ")", ":", "self", ".", ...
Apply the specified settings to this axis. Set self.minValue, self.minValueSource, self.maxValue, self.maxValueSource, and self.axisLimit reasonably based on the parameters provided. Arguments: axisMin -- a finite number, or None to choose a round minimum limit that includes all of the data. axisMax -- a finite number, 'max' to use the maximum value contained in the data, or None to choose a round maximum limit that includes all of the data. axisLimit -- a finite number to use as an upper limit on maxValue, or None to impose no upper limit.
[ "Apply", "the", "specified", "settings", "to", "this", "axis", "." ]
python
train
ereOn/azmq
azmq/common.py
https://github.com/ereOn/azmq/blob/9f40d6d721eea7f7659ec6cc668811976db59854/azmq/common.py#L428-L441
def write_nowait(self, item): """ Write in the box in a non-blocking manner. If the box is full, an exception is thrown. You should always check for fullness with `full` or `wait_not_full` before calling this method. :param item: An item. """ self._queue.put_nowait(item) self._can_read.set() if self._queue.full(): self._can_write.clear()
[ "def", "write_nowait", "(", "self", ",", "item", ")", ":", "self", ".", "_queue", ".", "put_nowait", "(", "item", ")", "self", ".", "_can_read", ".", "set", "(", ")", "if", "self", ".", "_queue", ".", "full", "(", ")", ":", "self", ".", "_can_write...
Write in the box in a non-blocking manner. If the box is full, an exception is thrown. You should always check for fullness with `full` or `wait_not_full` before calling this method. :param item: An item.
[ "Write", "in", "the", "box", "in", "a", "non", "-", "blocking", "manner", "." ]
python
train
cuihantao/andes
andes/models/base.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/base.py#L1462-L1483
def _check_Vn(self): """Check data consistency of Vn and Vdcn if connected to Bus or Node :return None """ if hasattr(self, 'bus') and hasattr(self, 'Vn'): bus_Vn = self.read_data_ext('Bus', field='Vn', idx=self.bus) for name, bus, Vn, Vn0 in zip(self.name, self.bus, self.Vn, bus_Vn): if Vn != Vn0: self.log( '<{}> has Vn={} different from bus <{}> Vn={}.'.format( name, Vn, bus, Vn0), WARNING) if hasattr(self, 'node') and hasattr(self, 'Vdcn'): node_Vdcn = self.read_data_ext('Node', field='Vdcn', idx=self.node) for name, node, Vdcn, Vdcn0 in zip(self.name, self.node, self.Vdcn, node_Vdcn): if Vdcn != Vdcn0: self.log( '<{}> has Vdcn={} different from node <{}> Vdcn={}.' .format(name, Vdcn, node, Vdcn0), WARNING)
[ "def", "_check_Vn", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'bus'", ")", "and", "hasattr", "(", "self", ",", "'Vn'", ")", ":", "bus_Vn", "=", "self", ".", "read_data_ext", "(", "'Bus'", ",", "field", "=", "'Vn'", ",", "idx", "=",...
Check data consistency of Vn and Vdcn if connected to Bus or Node :return None
[ "Check", "data", "consistency", "of", "Vn", "and", "Vdcn", "if", "connected", "to", "Bus", "or", "Node" ]
python
train
inspirehep/harvesting-kit
harvestingkit/bibrecord.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/bibrecord.py#L1579-L1597
def _compare_fields(field1, field2, strict=True): """ Compare 2 fields. If strict is True, then the order of the subfield will be taken care of, if not then the order of the subfields doesn't matter. :return: True if the field are equivalent, False otherwise. """ if strict: # Return a simple equal test on the field minus the position. return field1[:4] == field2[:4] else: if field1[1:4] != field2[1:4]: # Different indicators or controlfield value. return False else: # Compare subfields in a loose way. return set(field1[0]) == set(field2[0])
[ "def", "_compare_fields", "(", "field1", ",", "field2", ",", "strict", "=", "True", ")", ":", "if", "strict", ":", "# Return a simple equal test on the field minus the position.", "return", "field1", "[", ":", "4", "]", "==", "field2", "[", ":", "4", "]", "els...
Compare 2 fields. If strict is True, then the order of the subfield will be taken care of, if not then the order of the subfields doesn't matter. :return: True if the field are equivalent, False otherwise.
[ "Compare", "2", "fields", "." ]
python
valid