repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
aws/aws-dynamodb-encryption-python
src/dynamodb_encryption_sdk/internal/utils.py
https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/utils.py#L147-L158
def crypto_config_from_cache(materials_provider, attribute_actions, table_info_cache, table_name): """Build a crypto config from the provided values, loading the table info from the provided cache. :returns: crypto config and updated kwargs :rtype: tuple(CryptoConfig, dict) """ table_info = table_info_cache.table_info(table_name) attribute_actions = attribute_actions.copy() attribute_actions.set_index_keys(*table_info.protected_index_keys()) return crypto_config_from_table_info(materials_provider, attribute_actions, table_info)
[ "def", "crypto_config_from_cache", "(", "materials_provider", ",", "attribute_actions", ",", "table_info_cache", ",", "table_name", ")", ":", "table_info", "=", "table_info_cache", ".", "table_info", "(", "table_name", ")", "attribute_actions", "=", "attribute_actions", ...
Build a crypto config from the provided values, loading the table info from the provided cache. :returns: crypto config and updated kwargs :rtype: tuple(CryptoConfig, dict)
[ "Build", "a", "crypto", "config", "from", "the", "provided", "values", "loading", "the", "table", "info", "from", "the", "provided", "cache", "." ]
python
train
rosenbrockc/fortpy
fortpy/elements.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L1315-L1327
def first(self): """Returns the first module procedure embedded in the interface that has a valid instance of a CodeElement. """ if self._first is None: for target in self.targets: if target is not None: self._first = target break else: self._first = False return self._first
[ "def", "first", "(", "self", ")", ":", "if", "self", ".", "_first", "is", "None", ":", "for", "target", "in", "self", ".", "targets", ":", "if", "target", "is", "not", "None", ":", "self", ".", "_first", "=", "target", "break", "else", ":", "self",...
Returns the first module procedure embedded in the interface that has a valid instance of a CodeElement.
[ "Returns", "the", "first", "module", "procedure", "embedded", "in", "the", "interface", "that", "has", "a", "valid", "instance", "of", "a", "CodeElement", "." ]
python
train
miku/gluish
gluish/task.py
https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/task.py#L46-L51
def is_closest_date_parameter(task, param_name): """ Return the parameter class of param_name on task. """ for name, obj in task.get_params(): if name == param_name: return hasattr(obj, 'use_closest_date') return False
[ "def", "is_closest_date_parameter", "(", "task", ",", "param_name", ")", ":", "for", "name", ",", "obj", "in", "task", ".", "get_params", "(", ")", ":", "if", "name", "==", "param_name", ":", "return", "hasattr", "(", "obj", ",", "'use_closest_date'", ")",...
Return the parameter class of param_name on task.
[ "Return", "the", "parameter", "class", "of", "param_name", "on", "task", "." ]
python
train
i3visio/osrframework
osrframework/utils/browser.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/browser.py#L141-L182
def recoverURL(self, url): """ Public method to recover a resource. Args: ----- url: The URL to be collected. Returns: -------- Returns a resource that has to be read, for instance, with html = self.br.read() """ # Configuring user agents... self.setUserAgent() # Configuring proxies if "https://" in url: self.setProxy(protocol = "https") else: self.setProxy(protocol = "http") # Giving special treatment for .onion platforms if ".onion" in url: try: # TODO: configuring manually the tor bundle pass except: # TODO: capturing the error and eventually trying the tor2web approach #url = url.replace(".onion", ".tor2web.org") pass url = url.replace(".onion", ".onion.cab") # Opening the resource try: recurso = self.br.open(url) except: # Something happened. Maybe the request was forbidden? return None html = recurso.read() return html
[ "def", "recoverURL", "(", "self", ",", "url", ")", ":", "# Configuring user agents...", "self", ".", "setUserAgent", "(", ")", "# Configuring proxies", "if", "\"https://\"", "in", "url", ":", "self", ".", "setProxy", "(", "protocol", "=", "\"https\"", ")", "el...
Public method to recover a resource. Args: ----- url: The URL to be collected. Returns: -------- Returns a resource that has to be read, for instance, with html = self.br.read()
[ "Public", "method", "to", "recover", "a", "resource", "." ]
python
train
maartenbreddels/ipyvolume
ipyvolume/pylab.py
https://github.com/maartenbreddels/ipyvolume/blob/e68b72852b61276f8e6793bc8811f5b2432a155f/ipyvolume/pylab.py#L290-L302
def squarelim(): """Set all axes with equal aspect ratio, such that the space is 'square'.""" fig = gcf() xmin, xmax = fig.xlim ymin, ymax = fig.ylim zmin, zmax = fig.zlim width = max([abs(xmax - xmin), abs(ymax - ymin), abs(zmax - zmin)]) xc = (xmin + xmax) / 2 yc = (ymin + ymax) / 2 zc = (zmin + zmax) / 2 xlim(xc - width / 2, xc + width / 2) ylim(yc - width / 2, yc + width / 2) zlim(zc - width / 2, zc + width / 2)
[ "def", "squarelim", "(", ")", ":", "fig", "=", "gcf", "(", ")", "xmin", ",", "xmax", "=", "fig", ".", "xlim", "ymin", ",", "ymax", "=", "fig", ".", "ylim", "zmin", ",", "zmax", "=", "fig", ".", "zlim", "width", "=", "max", "(", "[", "abs", "(...
Set all axes with equal aspect ratio, such that the space is 'square'.
[ "Set", "all", "axes", "with", "equal", "aspect", "ratio", "such", "that", "the", "space", "is", "square", "." ]
python
train
etingof/pysnmp
pysnmp/smi/rfc1902.py
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/rfc1902.py#L1135-L1165
def addAsn1MibSource(self, *asn1Sources, **kwargs): """Adds path to a repository to search ASN.1 MIB files. Parameters ---------- *asn1Sources : one or more URL in form of :py:obj:`str` identifying local or remote ASN.1 MIB repositories. Path must include the *@mib@* component which will be replaced with MIB module name at the time of search. Returns ------- : :py:class:`~pysnmp.smi.rfc1902.NotificationType` reference to itself Notes ----- Please refer to :py:class:`~pysmi.reader.localfile.FileReader`, :py:class:`~pysmi.reader.httpclient.HttpReader` and :py:class:`~pysmi.reader.ftpclient.FtpReader` classes for in-depth information on ASN.1 MIB lookup. Examples -------- >>> NotificationType(ObjectIdentity('IF-MIB', 'linkDown'), (), {}).addAsn1Source('http://mibs.snmplabs.com/asn1/@mib@') NotificationType(ObjectIdentity('IF-MIB', 'linkDown'), (), {}) >>> """ self._objectIdentity.addAsn1MibSource(*asn1Sources, **kwargs) return self
[ "def", "addAsn1MibSource", "(", "self", ",", "*", "asn1Sources", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_objectIdentity", ".", "addAsn1MibSource", "(", "*", "asn1Sources", ",", "*", "*", "kwargs", ")", "return", "self" ]
Adds path to a repository to search ASN.1 MIB files. Parameters ---------- *asn1Sources : one or more URL in form of :py:obj:`str` identifying local or remote ASN.1 MIB repositories. Path must include the *@mib@* component which will be replaced with MIB module name at the time of search. Returns ------- : :py:class:`~pysnmp.smi.rfc1902.NotificationType` reference to itself Notes ----- Please refer to :py:class:`~pysmi.reader.localfile.FileReader`, :py:class:`~pysmi.reader.httpclient.HttpReader` and :py:class:`~pysmi.reader.ftpclient.FtpReader` classes for in-depth information on ASN.1 MIB lookup. Examples -------- >>> NotificationType(ObjectIdentity('IF-MIB', 'linkDown'), (), {}).addAsn1Source('http://mibs.snmplabs.com/asn1/@mib@') NotificationType(ObjectIdentity('IF-MIB', 'linkDown'), (), {}) >>>
[ "Adds", "path", "to", "a", "repository", "to", "search", "ASN", ".", "1", "MIB", "files", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/ansi_code_processor.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/ansi_code_processor.py#L76-L84
def reset_sgr(self): """ Reset graphics attributs to their default values. """ self.intensity = 0 self.italic = False self.bold = False self.underline = False self.foreground_color = None self.background_color = None
[ "def", "reset_sgr", "(", "self", ")", ":", "self", ".", "intensity", "=", "0", "self", ".", "italic", "=", "False", "self", ".", "bold", "=", "False", "self", ".", "underline", "=", "False", "self", ".", "foreground_color", "=", "None", "self", ".", ...
Reset graphics attributs to their default values.
[ "Reset", "graphics", "attributs", "to", "their", "default", "values", "." ]
python
test
RetailMeNotSandbox/acky
acky/ec2.py
https://github.com/RetailMeNotSandbox/acky/blob/fcd4d092c42892ede7c924cafc41e9cf4be3fb9f/acky/ec2.py#L515-L519
def attach(self, volume_id, instance_id, device_path): """Attach a volume to an instance, exposing it with a device name.""" return self.call("AttachVolume", VolumeId=volume_id, InstanceId=instance_id, Device=device_path)
[ "def", "attach", "(", "self", ",", "volume_id", ",", "instance_id", ",", "device_path", ")", ":", "return", "self", ".", "call", "(", "\"AttachVolume\"", ",", "VolumeId", "=", "volume_id", ",", "InstanceId", "=", "instance_id", ",", "Device", "=", "device_pa...
Attach a volume to an instance, exposing it with a device name.
[ "Attach", "a", "volume", "to", "an", "instance", "exposing", "it", "with", "a", "device", "name", "." ]
python
train
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1838-L1847
def update_event_types(self): """Update event types in event type box.""" self.idx_evt_type.clear() self.idx_evt_type.setSelectionMode(QAbstractItemView.ExtendedSelection) event_types = sorted(self.parent.notes.annot.event_types, key=str.lower) for ty in event_types: item = QListWidgetItem(ty) self.idx_evt_type.addItem(item)
[ "def", "update_event_types", "(", "self", ")", ":", "self", ".", "idx_evt_type", ".", "clear", "(", ")", "self", ".", "idx_evt_type", ".", "setSelectionMode", "(", "QAbstractItemView", ".", "ExtendedSelection", ")", "event_types", "=", "sorted", "(", "self", "...
Update event types in event type box.
[ "Update", "event", "types", "in", "event", "type", "box", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py#L12-L23
def show_raslog_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_raslog = ET.Element("show_raslog") config = show_raslog input = ET.SubElement(show_raslog, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_raslog_input_rbridge_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_raslog", "=", "ET", ".", "Element", "(", "\"show_raslog\"", ")", "config", "=", "show_raslog", "input", ...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
opencobra/memote
memote/support/helpers.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L184-L217
def is_transport_reaction_formulae(rxn): """ Return boolean if a reaction is a transport reaction (from formulae). Parameters ---------- rxn: cobra.Reaction The metabolic reaction under investigation. """ # Collecting criteria to classify transporters by. rxn_reactants = set([met.formula for met in rxn.reactants]) rxn_products = set([met.formula for met in rxn.products]) # Looking for formulas that stay the same on both side of the reaction. transported_mets = \ [formula for formula in rxn_reactants if formula in rxn_products] # Collect information on the elemental differences between # compartments in the reaction. delta_dicts = find_transported_elements(rxn) non_zero_array = [v for (k, v) in iteritems(delta_dicts) if v != 0] # Excluding reactions such as oxidoreductases where no net # transport of Hydrogen is occurring, but rather just an exchange of # electrons or charges effecting a change in protonation. if set(transported_mets) != set('H') and list( delta_dicts.keys() ) == ['H']: pass # All other reactions for which the amount of transported elements is # not zero, which are not part of the model's exchange nor # biomass reactions, are defined as transport reactions. # This includes reactions where the transported metabolite reacts with # a carrier molecule. elif sum(non_zero_array): return True
[ "def", "is_transport_reaction_formulae", "(", "rxn", ")", ":", "# Collecting criteria to classify transporters by.", "rxn_reactants", "=", "set", "(", "[", "met", ".", "formula", "for", "met", "in", "rxn", ".", "reactants", "]", ")", "rxn_products", "=", "set", "(...
Return boolean if a reaction is a transport reaction (from formulae). Parameters ---------- rxn: cobra.Reaction The metabolic reaction under investigation.
[ "Return", "boolean", "if", "a", "reaction", "is", "a", "transport", "reaction", "(", "from", "formulae", ")", "." ]
python
train
lsst-sqre/documenteer
documenteer/stackdocs/build.py
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/build.py#L249-L395
def find_package_docs(package_dir, skippedNames=None): """Find documentation directories in a package using ``manifest.yaml``. Parameters ---------- package_dir : `str` Directory of an EUPS package. skippedNames : `list` of `str`, optional List of package or module names to skip when creating links. Returns ------- doc_dirs : namedtuple Attributes of the namedtuple are: - ``package_dirs`` (`dict`). Keys are package names (for example, ``'afw'``). Values are absolute directory paths to the package's documentation directory inside the package's ``doc`` directory. If there is no package-level documentation the dictionary will be empty. - ``modules_dirs`` (`dict`). Keys are module names (for example, ``'lsst.afw.table'``). Values are absolute directory paths to the module's directory inside the package's ``doc`` directory. If a package has no modules the returned dictionary will be empty. - ``static_doc_dirs`` (`dict`). Keys are directory names relative to the ``_static`` directory. Values are absolute directory paths to the static documentation directory in the package. If there isn't a declared ``_static`` directory, this dictionary is empty. Raises ------ NoPackageDocs Raised when the ``manifest.yaml`` file cannot be found in a package. Notes ----- Stack packages have documentation in subdirectories of their `doc` directory. The ``manifest.yaml`` file declares what these directories are so that they can be symlinked into the root project. There are three types of documentation directories: 1. Package doc directories contain documentation for the EUPS package aspect. This is optional. 2. Module doc directories contain documentation for a Python package aspect. These are optional. 3. Static doc directories are root directories inside the package's ``doc/_static/`` directory. These are optional. These are declared in a package's ``doc/manifest.yaml`` file. For example: .. code-block:: yaml package: "afw" modules: - "lsst.afw.image" - "lsst.afw.geom" statics: - "_static/afw" This YAML declares *module* documentation directories: - ``afw/doc/lsst.afw.image/`` - ``afw/doc/lsst.afw.geom/`` It also declares a *package* documentation directory: - ``afw/doc/afw`` And a static documentaton directory: - ``afw/doc/_static/afw`` """ logger = logging.getLogger(__name__) if skippedNames is None: skippedNames = [] doc_dir = os.path.join(package_dir, 'doc') modules_yaml_path = os.path.join(doc_dir, 'manifest.yaml') if not os.path.exists(modules_yaml_path): raise NoPackageDocs( 'Manifest YAML not found: {0}'.format(modules_yaml_path)) with open(modules_yaml_path) as f: manifest_data = yaml.safe_load(f) module_dirs = {} package_dirs = {} static_dirs = {} if 'modules' in manifest_data: for module_name in manifest_data['modules']: if module_name in skippedNames: logger.debug('Skipping module {0}'.format(module_name)) continue module_dir = os.path.join(doc_dir, module_name) # validate that the module's documentation directory does exist if not os.path.isdir(module_dir): message = 'module doc dir not found: {0}'.format(module_dir) logger.warning(message) continue module_dirs[module_name] = module_dir logger.debug('Found module doc dir {0}'.format(module_dir)) if 'package' in manifest_data: package_name = manifest_data['package'] full_package_dir = os.path.join(doc_dir, package_name) # validate the directory exists if os.path.isdir(full_package_dir) \ and package_name not in skippedNames: package_dirs[package_name] = full_package_dir logger.debug('Found package doc dir {0}'.format(full_package_dir)) else: logger.warning('package doc dir excluded or not found: {0}'.format( full_package_dir)) if 'statics' in manifest_data: for static_dirname in manifest_data['statics']: full_static_dir = os.path.join(doc_dir, static_dirname) # validate the directory exists if not os.path.isdir(full_static_dir): message = '_static doc dir not found: {0}'.format( full_static_dir) logger.warning(message) continue # Make a relative path to `_static` that's used as the # link source in the root docproject's _static/ directory relative_static_dir = os.path.relpath( full_static_dir, os.path.join(doc_dir, '_static')) static_dirs[relative_static_dir] = full_static_dir logger.debug('Found _static doc dir: {0}'.format(full_static_dir)) Dirs = namedtuple('Dirs', ['module_dirs', 'package_dirs', 'static_dirs']) return Dirs(module_dirs=module_dirs, package_dirs=package_dirs, static_dirs=static_dirs)
[ "def", "find_package_docs", "(", "package_dir", ",", "skippedNames", "=", "None", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "if", "skippedNames", "is", "None", ":", "skippedNames", "=", "[", "]", "doc_dir", "=", "os", ".",...
Find documentation directories in a package using ``manifest.yaml``. Parameters ---------- package_dir : `str` Directory of an EUPS package. skippedNames : `list` of `str`, optional List of package or module names to skip when creating links. Returns ------- doc_dirs : namedtuple Attributes of the namedtuple are: - ``package_dirs`` (`dict`). Keys are package names (for example, ``'afw'``). Values are absolute directory paths to the package's documentation directory inside the package's ``doc`` directory. If there is no package-level documentation the dictionary will be empty. - ``modules_dirs`` (`dict`). Keys are module names (for example, ``'lsst.afw.table'``). Values are absolute directory paths to the module's directory inside the package's ``doc`` directory. If a package has no modules the returned dictionary will be empty. - ``static_doc_dirs`` (`dict`). Keys are directory names relative to the ``_static`` directory. Values are absolute directory paths to the static documentation directory in the package. If there isn't a declared ``_static`` directory, this dictionary is empty. Raises ------ NoPackageDocs Raised when the ``manifest.yaml`` file cannot be found in a package. Notes ----- Stack packages have documentation in subdirectories of their `doc` directory. The ``manifest.yaml`` file declares what these directories are so that they can be symlinked into the root project. There are three types of documentation directories: 1. Package doc directories contain documentation for the EUPS package aspect. This is optional. 2. Module doc directories contain documentation for a Python package aspect. These are optional. 3. Static doc directories are root directories inside the package's ``doc/_static/`` directory. These are optional. These are declared in a package's ``doc/manifest.yaml`` file. For example: .. code-block:: yaml package: "afw" modules: - "lsst.afw.image" - "lsst.afw.geom" statics: - "_static/afw" This YAML declares *module* documentation directories: - ``afw/doc/lsst.afw.image/`` - ``afw/doc/lsst.afw.geom/`` It also declares a *package* documentation directory: - ``afw/doc/afw`` And a static documentaton directory: - ``afw/doc/_static/afw``
[ "Find", "documentation", "directories", "in", "a", "package", "using", "manifest", ".", "yaml", "." ]
python
train
101Loop/drf-addons
drfaddons/auth.py
https://github.com/101Loop/drf-addons/blob/62392c72e8bce237f4140a2b7171e89984cb15c5/drfaddons/auth.py#L27-L55
def get_authorization(self, request): """ This function extracts the authorization JWT string. It first looks for specified key in header and then looks for the same in body part. Parameters ---------- request: HttpRequest This is the raw request that user has sent. Returns ------- auth: str Return request's 'JWT_AUTH_KEY:' content from body or Header, as a bytestring. Hide some test client ickyness where the header can be unicode. """ from django.utils.six import text_type from rest_framework import HTTP_HEADER_ENCODING auth = request.data.get(self.key, b'') or request.META.get( self.header_key, b'') if isinstance(auth, text_type): # Work around django test client oddness auth = auth.encode(HTTP_HEADER_ENCODING) return auth
[ "def", "get_authorization", "(", "self", ",", "request", ")", ":", "from", "django", ".", "utils", ".", "six", "import", "text_type", "from", "rest_framework", "import", "HTTP_HEADER_ENCODING", "auth", "=", "request", ".", "data", ".", "get", "(", "self", "....
This function extracts the authorization JWT string. It first looks for specified key in header and then looks for the same in body part. Parameters ---------- request: HttpRequest This is the raw request that user has sent. Returns ------- auth: str Return request's 'JWT_AUTH_KEY:' content from body or Header, as a bytestring. Hide some test client ickyness where the header can be unicode.
[ "This", "function", "extracts", "the", "authorization", "JWT", "string", ".", "It", "first", "looks", "for", "specified", "key", "in", "header", "and", "then", "looks", "for", "the", "same", "in", "body", "part", "." ]
python
train
vertexproject/synapse
synapse/lib/cli.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/cli.py#L339-L386
async def runCmdLoop(self): ''' Run commands from a user in an interactive fashion until fini() or EOFError is raised. ''' while not self.isfini: # FIXME completion self.cmdtask = None try: line = await self.prompt() if not line: continue line = line.strip() if not line: continue coro = self.runCmdLine(line) self.cmdtask = self.schedCoro(coro) await self.cmdtask except KeyboardInterrupt: if self.isfini: return self.printf('<ctrl-c>') except (s_exc.CliFini, EOFError): await self.fini() except Exception: s = traceback.format_exc() self.printf(s) finally: if self.cmdtask is not None: self.cmdtask.cancel() try: self.cmdtask.result() except asyncio.CancelledError: # Wait a beat to let any remaining nodes to print out before we print the prompt await asyncio.sleep(1) except Exception: pass
[ "async", "def", "runCmdLoop", "(", "self", ")", ":", "while", "not", "self", ".", "isfini", ":", "# FIXME completion", "self", ".", "cmdtask", "=", "None", "try", ":", "line", "=", "await", "self", ".", "prompt", "(", ")", "if", "not", "line", ":", "...
Run commands from a user in an interactive fashion until fini() or EOFError is raised.
[ "Run", "commands", "from", "a", "user", "in", "an", "interactive", "fashion", "until", "fini", "()", "or", "EOFError", "is", "raised", "." ]
python
train
tanghaibao/jcvi
jcvi/compara/catalog.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/catalog.py#L492-L559
def omgprepare(args): """ %prog omgprepare ploidy anchorsfile blastfile Prepare to run Sankoff's OMG algorithm to get orthologs. """ from jcvi.formats.blast import cscore from jcvi.formats.base import DictFile p = OptionParser(omgprepare.__doc__) p.add_option("--norbh", action="store_true", help="Disable RBH hits [default: %default]") p.add_option("--pctid", default=0, type="int", help="Percent id cutoff for RBH hits [default: %default]") p.add_option("--cscore", default=90, type="int", help="C-score cutoff for RBH hits [default: %default]") p.set_stripnames() p.set_beds() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ploidy, anchorfile, blastfile = args norbh = opts.norbh pctid = opts.pctid cs = opts.cscore qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts) fp = open(ploidy) genomeidx = dict((x.split()[0], i) for i, x in enumerate(fp)) fp.close() ploidy = DictFile(ploidy) geneinfo(qbed, qorder, genomeidx, ploidy) geneinfo(sbed, sorder, genomeidx, ploidy) pf = blastfile.rsplit(".", 1)[0] cscorefile = pf + ".cscore" cscore([blastfile, "-o", cscorefile, "--cutoff=0", "--pct"]) ac = AnchorFile(anchorfile) pairs = set((a, b) for a, b, i in ac.iter_pairs()) logging.debug("Imported {0} pairs from `{1}`.".format(len(pairs), anchorfile)) weightsfile = pf + ".weights" fp = open(cscorefile) fw = open(weightsfile, "w") npairs = 0 for row in fp: a, b, c, pct = row.split() c, pct = float(c), float(pct) c = int(c * 100) if (a, b) not in pairs: if norbh: continue if c < cs: continue if pct < pctid: continue c /= 10 # This severely penalizes RBH against synteny print("\t".join((a, b, str(c))), file=fw) npairs += 1 fw.close() logging.debug("Write {0} pairs to `{1}`.".format(npairs, weightsfile))
[ "def", "omgprepare", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "blast", "import", "cscore", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "omgprepare", ".", "__doc__", ")", "p", ".", ...
%prog omgprepare ploidy anchorsfile blastfile Prepare to run Sankoff's OMG algorithm to get orthologs.
[ "%prog", "omgprepare", "ploidy", "anchorsfile", "blastfile" ]
python
train
pypa/pipenv
pipenv/vendor/pexpect/pty_spawn.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/pty_spawn.py#L563-L574
def sendcontrol(self, char): '''Helper method that wraps send() with mnemonic access for sending control character to the child (such as Ctrl-C or Ctrl-D). For example, to send Ctrl-G (ASCII 7, bell, '\a'):: child.sendcontrol('g') See also, sendintr() and sendeof(). ''' n, byte = self.ptyproc.sendcontrol(char) self._log_control(byte) return n
[ "def", "sendcontrol", "(", "self", ",", "char", ")", ":", "n", ",", "byte", "=", "self", ".", "ptyproc", ".", "sendcontrol", "(", "char", ")", "self", ".", "_log_control", "(", "byte", ")", "return", "n" ]
Helper method that wraps send() with mnemonic access for sending control character to the child (such as Ctrl-C or Ctrl-D). For example, to send Ctrl-G (ASCII 7, bell, '\a'):: child.sendcontrol('g') See also, sendintr() and sendeof().
[ "Helper", "method", "that", "wraps", "send", "()", "with", "mnemonic", "access", "for", "sending", "control", "character", "to", "the", "child", "(", "such", "as", "Ctrl", "-", "C", "or", "Ctrl", "-", "D", ")", ".", "For", "example", "to", "send", "Ctr...
python
train
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/build/lib/biosignalsnotebooks/extract.py
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/extract.py#L440-L559
def fatigue_eval_med_freq(data, sample_rate, time_units=True, raw_to_mv=True, device="biosignalsplux", resolution=16, show_plot=False): """ ----- Brief ----- Returns the evolution time series of EMG median frequency along the acquisition, based on a sliding window mechanism. ----------- Description ----------- The median frequency of activation events in EMG signal is particularly important in fatigue evaluation methods. This function calculates the median frequency of each activation period and allows to plot those values in order to see the temporal evolution of this particular feature. ---------- Parameters ---------- data : list EMG signal. sample_rate : int Sampling frequency. time_units : boolean If True this function will return the x axis samples in seconds. raw_to_mv : boolean If True then it is assumed that the input samples are in a raw format and the output results will be in mV. When True "device" and "resolution" inputs became mandatory. device : str Plux device label: - "bioplux" - "bioplux_exp" - "biosignalsplux" - "rachimeter" - "channeller" - "swifter" - "ddme_openbanplux" resolution : int Resolution selected during acquisition. show_plot : boolean If True, then a figure with the median frequency evolution will be shown. Returns ------- out : pandas.DataFrame DataFrame with the time and the sequence of median frequency evolution. """ # Conversion of data samples to mV if requested by raw_to_mv input. if raw_to_mv is True: data = raw_to_phy("EMG", device, data, resolution, option="mV") # Definition of the time axis. if time_units is True: time = numpy.linspace(0, len(data) / sample_rate, len(data)) else: time = numpy.linspace(0, len(data) - 1, len(data)) # Detection of muscular activation periods. burst_begin, burst_end = detect_emg_activations(data, sample_rate, smooth_level=20, threshold_level=10, time_units=False, volts=True, resolution=resolution, device=device, plot_result=False)[:2] # Iteration along bursts. median_freq_data = [] median_freq_time = [] for burst in range(0, len(burst_begin)): processing_window = data[burst_begin[burst]:burst_end[burst]] central_point = (burst_begin[burst] + burst_end[burst]) / 2 median_freq_time.append(central_point / sample_rate) # Generation of the processing window power spectrum. freqs, power = scisignal.welch(processing_window, fs=sample_rate, window='hanning', noverlap=0, nfft=int(256.)) # Determination of median power frequency. area_freq = integr.cumtrapz(power, freqs, initial=0) total_power = area_freq[-1] median_freq_data.append(freqs[numpy.where(area_freq >= total_power / 2)[0][0]]) # Graphical Representation step. if show_plot is True: list_figures_1 = plot([list(time), list(median_freq_time)], [list(data), list(median_freq_data)], title=["EMG Acquisition highlighting bursts", "Median Frequency Evolution"], gridPlot=True, gridLines=2, gridColumns=1, openSignalsStyle=True, x_axis_label="Time (s)", yAxisLabel=["Raw Data", "Median Frequency (Hz)"], x_range=[0, 125], show_plot=False) # Highlighting processing window. for burst in range(0, len(burst_begin)): color = opensignals_color_pallet() box_annotation = BoxAnnotation(left=burst_begin[burst] / sample_rate, right=burst_end[burst] / sample_rate, fill_color=color, fill_alpha=0.1) box_annotation_copy = BoxAnnotation(left=burst_begin[burst] / sample_rate, right=burst_end[burst] / sample_rate, fill_color=color, fill_alpha=0.1) list_figures_1[0].add_layout(box_annotation) list_figures_1[1].add_layout(box_annotation_copy) gridplot_1 = gridplot([[list_figures_1[0]], [list_figures_1[1]]], **opensignals_kwargs("gridplot")) show(gridplot_1) # pandas.DataFrame(a, columns=a.keys()) # pandas.DataFrame([a], columns=a.keys()) return pandas.DataFrame({"Time (s)": median_freq_time, "Median Frequency (Hz)": median_freq_data}, columns=["Time (s)", "Median Frequency (Hz)"])
[ "def", "fatigue_eval_med_freq", "(", "data", ",", "sample_rate", ",", "time_units", "=", "True", ",", "raw_to_mv", "=", "True", ",", "device", "=", "\"biosignalsplux\"", ",", "resolution", "=", "16", ",", "show_plot", "=", "False", ")", ":", "# Conversion of d...
----- Brief ----- Returns the evolution time series of EMG median frequency along the acquisition, based on a sliding window mechanism. ----------- Description ----------- The median frequency of activation events in EMG signal is particularly important in fatigue evaluation methods. This function calculates the median frequency of each activation period and allows to plot those values in order to see the temporal evolution of this particular feature. ---------- Parameters ---------- data : list EMG signal. sample_rate : int Sampling frequency. time_units : boolean If True this function will return the x axis samples in seconds. raw_to_mv : boolean If True then it is assumed that the input samples are in a raw format and the output results will be in mV. When True "device" and "resolution" inputs became mandatory. device : str Plux device label: - "bioplux" - "bioplux_exp" - "biosignalsplux" - "rachimeter" - "channeller" - "swifter" - "ddme_openbanplux" resolution : int Resolution selected during acquisition. show_plot : boolean If True, then a figure with the median frequency evolution will be shown. Returns ------- out : pandas.DataFrame DataFrame with the time and the sequence of median frequency evolution.
[ "-----", "Brief", "-----", "Returns", "the", "evolution", "time", "series", "of", "EMG", "median", "frequency", "along", "the", "acquisition", "based", "on", "a", "sliding", "window", "mechanism", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/optimizer/linesearch/hager_zhang.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py#L420-L542
def _line_search_after_bracketing( value_and_gradients_function, search_interval, val_0, f_lim, max_iterations, sufficient_decrease_param, curvature_param, shrinkage_param): """The main loop of line search after the minimum has been bracketed. Args: value_and_gradients_function: A Python callable that accepts a real scalar tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that correspond to scalar tensors of real dtype containing the point at which the function was evaluated, the value of the function, and its derivative at that point. The other namedtuple fields, if present, should be tensors or sequences (possibly nested) of tensors. In usual optimization application, this function would be generated by projecting the multivariate objective function along some specific direction. The direction is determined by some other procedure but should be a descent direction (i.e. the derivative of the projected univariate function must be negative at 0.). Alternatively, the function may represent the batching of `n` such line functions (e.g. projecting a single multivariate objective function along `n` distinct directions at once) accepting n points as input, i.e. a tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned namedtuple should each be a tensor of shape [n], with the corresponding input points, function values, and derivatives at those input points. search_interval: Instance of `HagerZhangLineSearchResults` containing the current line search interval. val_0: A namedtuple as returned by value_and_gradients_function evaluated at `0.`. The gradient must be negative (i.e. must be a descent direction). f_lim: Scalar `Tensor` of float dtype. max_iterations: Positive scalar `Tensor` of integral dtype. The maximum number of iterations to perform in the line search. The number of iterations used to bracket the minimum are also counted against this parameter. sufficient_decrease_param: Positive scalar `Tensor` of real dtype. Bounded above by the curvature param. Corresponds to `delta` in the terminology of [Hager and Zhang (2006)][2]. curvature_param: Positive scalar `Tensor` of real dtype. Bounded above by `1.`. Corresponds to 'sigma' in the terminology of [Hager and Zhang (2006)][2]. shrinkage_param: Scalar positive Tensor of real dtype. Must be less than `1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2]. Returns: A namedtuple containing the following fields. converged: Boolean `Tensor` of shape [n]. Whether a point satisfying Wolfe/Approx wolfe was found. failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g. if either the objective function or the gradient are not finite at an evaluation point. iterations: Scalar int32 `Tensor`. Number of line search iterations made. func_evals: Scalar int32 `Tensor`. Number of function evaluations made. left: A namedtuple, as returned by value_and_gradients_function, of the left end point of the updated bracketing interval. right: A namedtuple, as returned by value_and_gradients_function, of the right end point of the updated bracketing interval. """ def _loop_cond(curr_interval): """Loop condition.""" active = ~(curr_interval.converged | curr_interval.failed) return (curr_interval.iterations < max_iterations) & tf.reduce_any(input_tensor=active) def _loop_body(curr_interval): """The loop body.""" secant2_raw_result = hzl.secant2( value_and_gradients_function, val_0, curr_interval, f_lim, sufficient_decrease_param, curvature_param) secant2_result = HagerZhangLineSearchResult( converged=secant2_raw_result.converged, failed=secant2_raw_result.failed, iterations=curr_interval.iterations + 1, func_evals=secant2_raw_result.num_evals, left=secant2_raw_result.left, right=secant2_raw_result.right) should_check_shrinkage = ~(secant2_result.converged | secant2_result.failed) def _do_check_shrinkage(): """Check if interval has shrinked enough.""" old_width = curr_interval.right.x - curr_interval.left.x new_width = secant2_result.right.x - secant2_result.left.x sufficient_shrinkage = new_width < old_width * shrinkage_param func_is_flat = ( _very_close(curr_interval.left.f, curr_interval.right.f) & _very_close(secant2_result.left.f, secant2_result.right.f)) new_converged = ( should_check_shrinkage & sufficient_shrinkage & func_is_flat) needs_inner_bisect = should_check_shrinkage & ~sufficient_shrinkage inner_bisect_args = secant2_result._replace( converged=secant2_result.converged | new_converged) def _apply_inner_bisect(): return _line_search_inner_bisection( value_and_gradients_function, inner_bisect_args, needs_inner_bisect, f_lim) return prefer_static.cond( tf.reduce_any(input_tensor=needs_inner_bisect), _apply_inner_bisect, lambda: inner_bisect_args) next_args = prefer_static.cond( tf.reduce_any(input_tensor=should_check_shrinkage), _do_check_shrinkage, lambda: secant2_result) interval_shrunk = ( ~next_args.failed & _very_close(next_args.left.x, next_args.right.x)) return [next_args._replace(converged=next_args.converged | interval_shrunk)] return tf.while_loop( cond=_loop_cond, body=_loop_body, loop_vars=[search_interval], parallel_iterations=1)[0]
[ "def", "_line_search_after_bracketing", "(", "value_and_gradients_function", ",", "search_interval", ",", "val_0", ",", "f_lim", ",", "max_iterations", ",", "sufficient_decrease_param", ",", "curvature_param", ",", "shrinkage_param", ")", ":", "def", "_loop_cond", "(", ...
The main loop of line search after the minimum has been bracketed. Args: value_and_gradients_function: A Python callable that accepts a real scalar tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that correspond to scalar tensors of real dtype containing the point at which the function was evaluated, the value of the function, and its derivative at that point. The other namedtuple fields, if present, should be tensors or sequences (possibly nested) of tensors. In usual optimization application, this function would be generated by projecting the multivariate objective function along some specific direction. The direction is determined by some other procedure but should be a descent direction (i.e. the derivative of the projected univariate function must be negative at 0.). Alternatively, the function may represent the batching of `n` such line functions (e.g. projecting a single multivariate objective function along `n` distinct directions at once) accepting n points as input, i.e. a tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned namedtuple should each be a tensor of shape [n], with the corresponding input points, function values, and derivatives at those input points. search_interval: Instance of `HagerZhangLineSearchResults` containing the current line search interval. val_0: A namedtuple as returned by value_and_gradients_function evaluated at `0.`. The gradient must be negative (i.e. must be a descent direction). f_lim: Scalar `Tensor` of float dtype. max_iterations: Positive scalar `Tensor` of integral dtype. The maximum number of iterations to perform in the line search. The number of iterations used to bracket the minimum are also counted against this parameter. sufficient_decrease_param: Positive scalar `Tensor` of real dtype. Bounded above by the curvature param. Corresponds to `delta` in the terminology of [Hager and Zhang (2006)][2]. curvature_param: Positive scalar `Tensor` of real dtype. Bounded above by `1.`. Corresponds to 'sigma' in the terminology of [Hager and Zhang (2006)][2]. shrinkage_param: Scalar positive Tensor of real dtype. Must be less than `1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2]. Returns: A namedtuple containing the following fields. converged: Boolean `Tensor` of shape [n]. Whether a point satisfying Wolfe/Approx wolfe was found. failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g. if either the objective function or the gradient are not finite at an evaluation point. iterations: Scalar int32 `Tensor`. Number of line search iterations made. func_evals: Scalar int32 `Tensor`. Number of function evaluations made. left: A namedtuple, as returned by value_and_gradients_function, of the left end point of the updated bracketing interval. right: A namedtuple, as returned by value_and_gradients_function, of the right end point of the updated bracketing interval.
[ "The", "main", "loop", "of", "line", "search", "after", "the", "minimum", "has", "been", "bracketed", "." ]
python
test
isogeo/isogeo-api-py-minsdk
isogeo_pysdk/isogeo_sdk.py
https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L887-L901
def add_tags_shares(self, tags: dict = dict()): """Add shares list to the tags attributes in search results. :param dict tags: tags dictionary from a search request """ # check if shares_id have already been retrieved or not if not hasattr(self, "shares_id"): shares = self.shares() self.shares_id = { "share:{}".format(i.get("_id")): i.get("name") for i in shares } else: pass # update query tags tags.update(self.shares_id)
[ "def", "add_tags_shares", "(", "self", ",", "tags", ":", "dict", "=", "dict", "(", ")", ")", ":", "# check if shares_id have already been retrieved or not", "if", "not", "hasattr", "(", "self", ",", "\"shares_id\"", ")", ":", "shares", "=", "self", ".", "share...
Add shares list to the tags attributes in search results. :param dict tags: tags dictionary from a search request
[ "Add", "shares", "list", "to", "the", "tags", "attributes", "in", "search", "results", "." ]
python
train
rytilahti/python-songpal
songpal/group.py
https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/group.py#L125-L133
async def call(self, action, **kwargs): """Make an action call with given kwargs.""" act = self.service.action(action) _LOGGER.info("Calling %s with %s", action, kwargs) res = await act.async_call(**kwargs) _LOGGER.info(" Result: %s" % res) return res
[ "async", "def", "call", "(", "self", ",", "action", ",", "*", "*", "kwargs", ")", ":", "act", "=", "self", ".", "service", ".", "action", "(", "action", ")", "_LOGGER", ".", "info", "(", "\"Calling %s with %s\"", ",", "action", ",", "kwargs", ")", "r...
Make an action call with given kwargs.
[ "Make", "an", "action", "call", "with", "given", "kwargs", "." ]
python
train
20c/munge
munge/codec/__init__.py
https://github.com/20c/munge/blob/e20fef8c24e48d4b0a5c387820fbb2b7bebb0af0/munge/codec/__init__.py#L66-L80
def load_datafile(name, search_path=('.'), codecs=get_codecs(), **kwargs): """ find datafile and load them from codec TODO only does the first one kwargs: default = if passed will return that on failure instead of throwing """ mod = find_datafile(name, search_path, codecs) if not mod: if 'default' in kwargs: return kwargs['default'] raise IOError("file %s not found in search path %s" %(name, str(search_path))) (codec, datafile) = mod[0] return codec().load(open(datafile))
[ "def", "load_datafile", "(", "name", ",", "search_path", "=", "(", "'.'", ")", ",", "codecs", "=", "get_codecs", "(", ")", ",", "*", "*", "kwargs", ")", ":", "mod", "=", "find_datafile", "(", "name", ",", "search_path", ",", "codecs", ")", "if", "not...
find datafile and load them from codec TODO only does the first one kwargs: default = if passed will return that on failure instead of throwing
[ "find", "datafile", "and", "load", "them", "from", "codec", "TODO", "only", "does", "the", "first", "one", "kwargs", ":", "default", "=", "if", "passed", "will", "return", "that", "on", "failure", "instead", "of", "throwing" ]
python
train
ray-project/ray
python/ray/experimental/serve/mixin.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/serve/mixin.py#L50-L63
def _dispatch(self, input_batch: List[SingleQuery]): """Helper method to dispatch a batch of input to self.serve_method.""" method = getattr(self, self.serve_method) if hasattr(method, "ray_serve_batched_input"): batch = [inp.data for inp in input_batch] result = _execute_and_seal_error(method, batch, self.serve_method) for res, inp in zip(result, input_batch): ray.worker.global_worker.put_object(inp.result_object_id, res) else: for inp in input_batch: result = _execute_and_seal_error(method, inp.data, self.serve_method) ray.worker.global_worker.put_object(inp.result_object_id, result)
[ "def", "_dispatch", "(", "self", ",", "input_batch", ":", "List", "[", "SingleQuery", "]", ")", ":", "method", "=", "getattr", "(", "self", ",", "self", ".", "serve_method", ")", "if", "hasattr", "(", "method", ",", "\"ray_serve_batched_input\"", ")", ":",...
Helper method to dispatch a batch of input to self.serve_method.
[ "Helper", "method", "to", "dispatch", "a", "batch", "of", "input", "to", "self", ".", "serve_method", "." ]
python
train
PyconUK/ConferenceScheduler
src/conference_scheduler/scheduler.py
https://github.com/PyconUK/ConferenceScheduler/blob/fb139f0ef2eab5ac8f4919aa4994d94d4e040030/src/conference_scheduler/scheduler.py#L290-L349
def slot_schedule_difference(old_schedule, new_schedule): """Compute the difference between two schedules from a slot perspective Parameters ---------- old_schedule : list or tuple of :py:class:`resources.ScheduledItem` objects new_schedule : list or tuple of :py:class:`resources.ScheduledItem` objects Returns ------- list A list of :py:class:`resources.ChangedSlotScheduledItem` objects Example ------- >>> from conference_scheduler.resources import Event, Slot, ScheduledItem >>> from conference_scheduler.scheduler import slot_schedule_difference >>> events = [Event(f'event_{i}', 30, 0) for i in range(5)] >>> slots = [Slot(f'venue_{i}', '', 30, 100, None) for i in range(5)] >>> old_schedule = ( ... ScheduledItem(events[0], slots[0]), ... ScheduledItem(events[1], slots[1]), ... ScheduledItem(events[2], slots[2])) >>> new_schedule = ( ... ScheduledItem(events[0], slots[0]), ... ScheduledItem(events[1], slots[2]), ... ScheduledItem(events[2], slots[3]), ... ScheduledItem(events[3], slots[4])) >>> diff = slot_schedule_difference(old_schedule, new_schedule) >>> print([item.slot.venue for item in diff]) ['venue_1', 'venue_2', 'venue_3', 'venue_4'] """ old = {item.slot: item for item in old_schedule} new = {item.slot: item for item in new_schedule} common_slots = set(old.keys()).intersection(new.keys()) added_slots = new.keys() - old.keys() removed_slots = old.keys() - new.keys() changed = [ ChangedSlotScheduledItem( old[slot].slot, old[slot].event, new[slot].event) for slot in common_slots if old[slot].event != new[slot].event ] added = [ ChangedSlotScheduledItem(new[slot].slot, None, new[slot].event) for slot in added_slots ] removed = [ ChangedSlotScheduledItem(old[slot].slot, old[slot].event, None) for slot in removed_slots ] return sorted( changed + added + removed, key=lambda item: (item.slot.venue, item.slot.starts_at) )
[ "def", "slot_schedule_difference", "(", "old_schedule", ",", "new_schedule", ")", ":", "old", "=", "{", "item", ".", "slot", ":", "item", "for", "item", "in", "old_schedule", "}", "new", "=", "{", "item", ".", "slot", ":", "item", "for", "item", "in", ...
Compute the difference between two schedules from a slot perspective Parameters ---------- old_schedule : list or tuple of :py:class:`resources.ScheduledItem` objects new_schedule : list or tuple of :py:class:`resources.ScheduledItem` objects Returns ------- list A list of :py:class:`resources.ChangedSlotScheduledItem` objects Example ------- >>> from conference_scheduler.resources import Event, Slot, ScheduledItem >>> from conference_scheduler.scheduler import slot_schedule_difference >>> events = [Event(f'event_{i}', 30, 0) for i in range(5)] >>> slots = [Slot(f'venue_{i}', '', 30, 100, None) for i in range(5)] >>> old_schedule = ( ... ScheduledItem(events[0], slots[0]), ... ScheduledItem(events[1], slots[1]), ... ScheduledItem(events[2], slots[2])) >>> new_schedule = ( ... ScheduledItem(events[0], slots[0]), ... ScheduledItem(events[1], slots[2]), ... ScheduledItem(events[2], slots[3]), ... ScheduledItem(events[3], slots[4])) >>> diff = slot_schedule_difference(old_schedule, new_schedule) >>> print([item.slot.venue for item in diff]) ['venue_1', 'venue_2', 'venue_3', 'venue_4']
[ "Compute", "the", "difference", "between", "two", "schedules", "from", "a", "slot", "perspective" ]
python
train
ahmontero/dop
dop/client.py
https://github.com/ahmontero/dop/blob/40354ac6feefe92a7555fe2d1834138c9a03e518/dop/client.py#L169-L182
def reboot_droplet(self, droplet_id): """ This method allows you to reboot a droplet. This is the preferred method to use if a server is not responding. """ if not droplet_id: raise DOPException('droplet_id is required to reboot a droplet!') json = self.request('/droplets/%s/reboot' % droplet_id, method='GET') status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
[ "def", "reboot_droplet", "(", "self", ",", "droplet_id", ")", ":", "if", "not", "droplet_id", ":", "raise", "DOPException", "(", "'droplet_id is required to reboot a droplet!'", ")", "json", "=", "self", ".", "request", "(", "'/droplets/%s/reboot'", "%", "droplet_id...
This method allows you to reboot a droplet. This is the preferred method to use if a server is not responding.
[ "This", "method", "allows", "you", "to", "reboot", "a", "droplet", ".", "This", "is", "the", "preferred", "method", "to", "use", "if", "a", "server", "is", "not", "responding", "." ]
python
train
twisted/mantissa
xmantissa/interstore.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/interstore.py#L662-L671
def _messageFromSender(self, sender, messageID): """ Locate a previously queued message by a given sender and messageID. """ return self.store.findUnique( _QueuedMessage, AND(_QueuedMessage.senderUsername == sender.localpart, _QueuedMessage.senderDomain == sender.domain, _QueuedMessage.messageID == messageID), default=None)
[ "def", "_messageFromSender", "(", "self", ",", "sender", ",", "messageID", ")", ":", "return", "self", ".", "store", ".", "findUnique", "(", "_QueuedMessage", ",", "AND", "(", "_QueuedMessage", ".", "senderUsername", "==", "sender", ".", "localpart", ",", "_...
Locate a previously queued message by a given sender and messageID.
[ "Locate", "a", "previously", "queued", "message", "by", "a", "given", "sender", "and", "messageID", "." ]
python
train
nfcpy/nfcpy
src/nfc/snep/client.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/snep/client.py#L156-L176
def get_records(self, records=None, timeout=1.0): """Get NDEF message records from a SNEP Server. .. versionadded:: 0.13 The :class:`ndef.Record` list given by *records* is encoded as the request message octets input to :meth:`get_octets`. The return value is an :class:`ndef.Record` list decoded from the response message octets returned by :meth:`get_octets`. Same as:: import ndef send_octets = ndef.message_encoder(records) rcvd_octets = snep_client.get_octets(send_octets, timeout) records = list(ndef.message_decoder(rcvd_octets)) """ octets = b''.join(ndef.message_encoder(records)) if records else None octets = self.get_octets(octets, timeout) if octets and len(octets) >= 3: return list(ndef.message_decoder(octets))
[ "def", "get_records", "(", "self", ",", "records", "=", "None", ",", "timeout", "=", "1.0", ")", ":", "octets", "=", "b''", ".", "join", "(", "ndef", ".", "message_encoder", "(", "records", ")", ")", "if", "records", "else", "None", "octets", "=", "s...
Get NDEF message records from a SNEP Server. .. versionadded:: 0.13 The :class:`ndef.Record` list given by *records* is encoded as the request message octets input to :meth:`get_octets`. The return value is an :class:`ndef.Record` list decoded from the response message octets returned by :meth:`get_octets`. Same as:: import ndef send_octets = ndef.message_encoder(records) rcvd_octets = snep_client.get_octets(send_octets, timeout) records = list(ndef.message_decoder(rcvd_octets))
[ "Get", "NDEF", "message", "records", "from", "a", "SNEP", "Server", "." ]
python
train
pandas-dev/pandas
pandas/core/arrays/datetimelike.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1441-L1465
def validate_periods(periods): """ If a `periods` argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. Parameters ---------- periods : None, float, int Returns ------- periods : None or int Raises ------ TypeError if periods is None, float, or int """ if periods is not None: if lib.is_float(periods): periods = int(periods) elif not lib.is_integer(periods): raise TypeError('periods must be a number, got {periods}' .format(periods=periods)) return periods
[ "def", "validate_periods", "(", "periods", ")", ":", "if", "periods", "is", "not", "None", ":", "if", "lib", ".", "is_float", "(", "periods", ")", ":", "periods", "=", "int", "(", "periods", ")", "elif", "not", "lib", ".", "is_integer", "(", "periods",...
If a `periods` argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. Parameters ---------- periods : None, float, int Returns ------- periods : None or int Raises ------ TypeError if periods is None, float, or int
[ "If", "a", "periods", "argument", "is", "passed", "to", "the", "Datetime", "/", "Timedelta", "Array", "/", "Index", "constructor", "cast", "it", "to", "an", "integer", "." ]
python
train
tensorforce/tensorforce
tensorforce/agents/learning_agent.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/agents/learning_agent.py#L144-L194
def import_experience(self, experiences): """ Imports experiences. Args: experiences: """ if isinstance(experiences, dict): if self.unique_state: experiences['states'] = dict(state=experiences['states']) if self.unique_action: experiences['actions'] = dict(action=experiences['actions']) self.model.import_experience(**experiences) else: if self.unique_state: states = dict(state=list()) else: states = {name: list() for name in experiences[0]['states']} internals = [list() for _ in experiences[0]['internals']] if self.unique_action: actions = dict(action=list()) else: actions = {name: list() for name in experiences[0]['actions']} terminal = list() reward = list() for experience in experiences: if self.unique_state: states['state'].append(experience['states']) else: for name in sorted(states): states[name].append(experience['states'][name]) for n, internal in enumerate(internals): internal.append(experience['internals'][n]) if self.unique_action: actions['action'].append(experience['actions']) else: for name in sorted(actions): actions[name].append(experience['actions'][name]) terminal.append(experience['terminal']) reward.append(experience['reward']) self.model.import_experience( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
[ "def", "import_experience", "(", "self", ",", "experiences", ")", ":", "if", "isinstance", "(", "experiences", ",", "dict", ")", ":", "if", "self", ".", "unique_state", ":", "experiences", "[", "'states'", "]", "=", "dict", "(", "state", "=", "experiences"...
Imports experiences. Args: experiences:
[ "Imports", "experiences", "." ]
python
valid
python-diamond/Diamond
src/collectors/elb/elb.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/elb/elb.py#L109-L116
def get_zones(region, auth_kwargs): """ :param auth_kwargs: :param region: region to get the availability zones for :return: list of availability zones """ ec2_conn = boto.ec2.connect_to_region(region, **auth_kwargs) return [zone.name for zone in ec2_conn.get_all_zones()]
[ "def", "get_zones", "(", "region", ",", "auth_kwargs", ")", ":", "ec2_conn", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "region", ",", "*", "*", "auth_kwargs", ")", "return", "[", "zone", ".", "name", "for", "zone", "in", "ec2_conn", ".", ...
:param auth_kwargs: :param region: region to get the availability zones for :return: list of availability zones
[ ":", "param", "auth_kwargs", ":", ":", "param", "region", ":", "region", "to", "get", "the", "availability", "zones", "for", ":", "return", ":", "list", "of", "availability", "zones" ]
python
train
keon/algorithms
algorithms/tree/is_balanced.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/is_balanced.py#L12-L22
def __get_depth(root): """ return 0 if unbalanced else depth + 1 """ if root is None: return 0 left = __get_depth(root.left) right = __get_depth(root.right) if abs(left-right) > 1 or -1 in [left, right]: return -1 return 1 + max(left, right)
[ "def", "__get_depth", "(", "root", ")", ":", "if", "root", "is", "None", ":", "return", "0", "left", "=", "__get_depth", "(", "root", ".", "left", ")", "right", "=", "__get_depth", "(", "root", ".", "right", ")", "if", "abs", "(", "left", "-", "rig...
return 0 if unbalanced else depth + 1
[ "return", "0", "if", "unbalanced", "else", "depth", "+", "1" ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/bindings/__init__.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L32-L52
def verify_string_dxid(dxid, expected_classes): ''' :param dxid: Value to verify as a DNAnexus ID of class *expected_class* :param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"] :type expected_classes: string or list of strings :raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class ''' if isinstance(expected_classes, basestring): expected_classes = [expected_classes] if not isinstance(expected_classes, list) or len(expected_classes) == 0: raise DXError('verify_string_dxid: expected_classes should be a string or list of strings') if not (isinstance(dxid, basestring) and re.match('^(' + '|'.join(expected_classes) + ')-[0-9a-zA-Z]{24}$', dxid)): if len(expected_classes) == 1: str_expected_classes = expected_classes[0] elif len(expected_classes) == 2: str_expected_classes = ' or '.join(expected_classes) else: str_expected_classes = ', '.join(expected_classes[:-1]) + ', or ' + expected_classes[-1] raise DXError('Invalid ID of class %s: %r' % (str_expected_classes, dxid))
[ "def", "verify_string_dxid", "(", "dxid", ",", "expected_classes", ")", ":", "if", "isinstance", "(", "expected_classes", ",", "basestring", ")", ":", "expected_classes", "=", "[", "expected_classes", "]", "if", "not", "isinstance", "(", "expected_classes", ",", ...
:param dxid: Value to verify as a DNAnexus ID of class *expected_class* :param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"] :type expected_classes: string or list of strings :raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class
[ ":", "param", "dxid", ":", "Value", "to", "verify", "as", "a", "DNAnexus", "ID", "of", "class", "*", "expected_class", "*", ":", "param", "expected_classes", ":", "Single", "string", "or", "list", "of", "strings", "of", "allowed", "classes", "of", "the", ...
python
train
inveniosoftware-contrib/invenio-classifier
invenio_classifier/reader.py
https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/invenio_classifier/reader.py#L168-L212
def _get_remote_ontology(onto_url, time_difference=None): """Check if the online ontology is more recent than the local ontology. If yes, try to download and store it in Invenio's cache directory. Return a boolean describing the success of the operation. :return: path to the downloaded ontology. """ if onto_url is None: return False dl_dir = os.path.join( current_app.config["CLASSIFIER_WORKDIR"] or tempfile.gettempdir(), "classifier" ) if not os.path.exists(dl_dir): os.makedirs(dl_dir) local_file = dl_dir + os.path.basename(onto_url) remote_modif_time = _get_last_modification_date(onto_url) try: local_modif_seconds = os.path.getmtime(local_file) except OSError: # The local file does not exist. Download the ontology. download = True current_app.logger.info("The local ontology could not be found.") else: local_modif_time = datetime(*time.gmtime(local_modif_seconds)[0:6]) # Let's set a time delta of 1 hour and 10 minutes. time_difference = time_difference or timedelta(hours=1, minutes=10) download = remote_modif_time > local_modif_time + time_difference if download: current_app.logger.info( "The remote ontology '{0}' is more recent " "than the local ontology.".format(onto_url) ) if download: if not _download_ontology(onto_url, local_file): current_app.logger.warning( "Error downloading the ontology from: {0}".format(onto_url) ) return local_file
[ "def", "_get_remote_ontology", "(", "onto_url", ",", "time_difference", "=", "None", ")", ":", "if", "onto_url", "is", "None", ":", "return", "False", "dl_dir", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "config", "[", "\"CLASSIFIER_WORKDI...
Check if the online ontology is more recent than the local ontology. If yes, try to download and store it in Invenio's cache directory. Return a boolean describing the success of the operation. :return: path to the downloaded ontology.
[ "Check", "if", "the", "online", "ontology", "is", "more", "recent", "than", "the", "local", "ontology", "." ]
python
train
koordinates/python-client
koordinates/layers.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L96-L109
def create_draft(self, layer_id): """ Creates a new draft version. If anything in the data object has changed then an import will begin immediately. Otherwise to force a re-import from the previous sources call :py:meth:`koordinates.layers.LayerManager.start_import`. :rtype: Layer :return: the new version :raises Conflict: if there is already a draft version for this layer. """ target_url = self.client.get_url('VERSION', 'POST', 'create', {'layer_id': layer_id}) r = self.client.request('POST', target_url, json={}) return self.create_from_result(r.json())
[ "def", "create_draft", "(", "self", ",", "layer_id", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'VERSION'", ",", "'POST'", ",", "'create'", ",", "{", "'layer_id'", ":", "layer_id", "}", ")", "r", "=", "self", ".", "client...
Creates a new draft version. If anything in the data object has changed then an import will begin immediately. Otherwise to force a re-import from the previous sources call :py:meth:`koordinates.layers.LayerManager.start_import`. :rtype: Layer :return: the new version :raises Conflict: if there is already a draft version for this layer.
[ "Creates", "a", "new", "draft", "version", "." ]
python
train
gtaylor/python-colormath
colormath/color_conversions.py
https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/color_conversions.py#L853-L863
def CMYK_to_CMY(cobj, *args, **kwargs): """ Converts CMYK to CMY. NOTE: CMYK and CMY values range from 0.0 to 1.0 """ cmy_c = cobj.cmyk_c * (1.0 - cobj.cmyk_k) + cobj.cmyk_k cmy_m = cobj.cmyk_m * (1.0 - cobj.cmyk_k) + cobj.cmyk_k cmy_y = cobj.cmyk_y * (1.0 - cobj.cmyk_k) + cobj.cmyk_k return CMYColor(cmy_c, cmy_m, cmy_y)
[ "def", "CMYK_to_CMY", "(", "cobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cmy_c", "=", "cobj", ".", "cmyk_c", "*", "(", "1.0", "-", "cobj", ".", "cmyk_k", ")", "+", "cobj", ".", "cmyk_k", "cmy_m", "=", "cobj", ".", "cmyk_m", "*", ...
Converts CMYK to CMY. NOTE: CMYK and CMY values range from 0.0 to 1.0
[ "Converts", "CMYK", "to", "CMY", "." ]
python
train
rbw/pysnow
pysnow/legacy_request.py
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L62-L73
def count(self): """ Returns the number of records the query would yield""" self.request_params.update({'sysparm_count': True}) response = self.session.get(self._get_stats_url(), params=self._get_formatted_query(fields=list(), limit=None, order_by=list(), offset=None)) content = self._get_content(response) return int(content['stats']['count'])
[ "def", "count", "(", "self", ")", ":", "self", ".", "request_params", ".", "update", "(", "{", "'sysparm_count'", ":", "True", "}", ")", "response", "=", "self", ".", "session", ".", "get", "(", "self", ".", "_get_stats_url", "(", ")", ",", "params", ...
Returns the number of records the query would yield
[ "Returns", "the", "number", "of", "records", "the", "query", "would", "yield" ]
python
train
cslarsen/crianza
crianza/interpreter.py
https://github.com/cslarsen/crianza/blob/fa044f9d491f37cc06892bad14b2c80b8ac5a7cd/crianza/interpreter.py#L50-L52
def isbinary(*args): """Checks if value can be part of binary/bitwise operations.""" return all(map(lambda c: isnumber(c) or isbool(c), args))
[ "def", "isbinary", "(", "*", "args", ")", ":", "return", "all", "(", "map", "(", "lambda", "c", ":", "isnumber", "(", "c", ")", "or", "isbool", "(", "c", ")", ",", "args", ")", ")" ]
Checks if value can be part of binary/bitwise operations.
[ "Checks", "if", "value", "can", "be", "part", "of", "binary", "/", "bitwise", "operations", "." ]
python
train
stevearc/dynamo3
dynamo3/connection.py
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L90-L118
def connect_to_region(cls, region, session=None, access_key=None, secret_key=None, **kwargs): """ Connect to an AWS region. This method has been deprecated in favor of :meth:`~.connect` Parameters ---------- region : str Name of an AWS region session : :class:`~botocore.session.Session`, optional The Session object to use for the connection access_key : str, optional If session is None, set this access key when creating the session secret_key : str, optional If session is None, set this secret key when creating the session **kwargs : dict Keyword arguments to pass to the constructor """ warnings.warn("connect_to_region is deprecated and will be removed. " "Use connect instead.") if session is None: session = botocore.session.get_session() if access_key is not None: session.set_credentials(access_key, secret_key) client = session.create_client('dynamodb', region) return cls(client, **kwargs)
[ "def", "connect_to_region", "(", "cls", ",", "region", ",", "session", "=", "None", ",", "access_key", "=", "None", ",", "secret_key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"connect_to_region is deprecated and will be ...
Connect to an AWS region. This method has been deprecated in favor of :meth:`~.connect` Parameters ---------- region : str Name of an AWS region session : :class:`~botocore.session.Session`, optional The Session object to use for the connection access_key : str, optional If session is None, set this access key when creating the session secret_key : str, optional If session is None, set this secret key when creating the session **kwargs : dict Keyword arguments to pass to the constructor
[ "Connect", "to", "an", "AWS", "region", "." ]
python
train
scarface-4711/denonavr
denonavr/denonavr.py
https://github.com/scarface-4711/denonavr/blob/59a136e27b43cb1d1e140cf67705087b3aa377cd/denonavr/denonavr.py#L1473-L1493
def set_sound_mode_dict(self, sound_mode_dict): """Set the matching dictionary used to match the raw sound mode.""" error_msg = ("Syntax of sound mode dictionary not valid, " "use: OrderedDict([('COMMAND', ['VALUE1','VALUE2'])])") if isinstance(sound_mode_dict, dict): mode_list = list(sound_mode_dict.values()) for sublist in mode_list: if isinstance(sublist, list): for element in sublist: if not isinstance(element, str): _LOGGER.error(error_msg) return False else: _LOGGER.error(error_msg) return False else: _LOGGER.error(error_msg) return False self._sound_mode_dict = sound_mode_dict self._sm_match_dict = self.construct_sm_match_dict() return True
[ "def", "set_sound_mode_dict", "(", "self", ",", "sound_mode_dict", ")", ":", "error_msg", "=", "(", "\"Syntax of sound mode dictionary not valid, \"", "\"use: OrderedDict([('COMMAND', ['VALUE1','VALUE2'])])\"", ")", "if", "isinstance", "(", "sound_mode_dict", ",", "dict", ")"...
Set the matching dictionary used to match the raw sound mode.
[ "Set", "the", "matching", "dictionary", "used", "to", "match", "the", "raw", "sound", "mode", "." ]
python
train
DataDog/integrations-core
mongo/datadog_checks/mongo/mongo.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/mongo/datadog_checks/mongo/mongo.py#L653-L1045
def check(self, instance): """ Returns a dictionary that looks a lot like what's sent back by db.serverStatus() """ def total_seconds(td): """ Returns total seconds of a timedelta in a way that's safe for Python < 2.7 """ if hasattr(td, 'total_seconds'): return td.total_seconds() else: return (lag.microseconds + (lag.seconds + lag.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 if 'server' not in instance: raise Exception("Missing 'server' in mongo config") # x.509 authentication ssl_params = { 'ssl': instance.get('ssl', None), 'ssl_keyfile': instance.get('ssl_keyfile', None), 'ssl_certfile': instance.get('ssl_certfile', None), 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None), 'ssl_ca_certs': instance.get('ssl_ca_certs', None), } for key, param in list(iteritems(ssl_params)): if param is None: del ssl_params[key] server = instance['server'] username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri( server, sanitize_username=bool(ssl_params) ) additional_metrics = instance.get('additional_metrics', []) # Get the list of metrics to collect collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics metrics_to_collect = self._get_metrics_to_collect(server, additional_metrics) # Tagging tags = instance.get('tags', []) # ...de-dupe tags to avoid a memory leak tags = list(set(tags)) if not db_name: self.log.info('No MongoDB database found in URI. Defaulting to admin.') db_name = 'admin' service_check_tags = ["db:%s" % db_name] service_check_tags.extend(tags) # ...add the `server` tag to the metrics' tags only # (it's added in the backend for service checks) tags.append('server:%s' % clean_server_name) if nodelist: host = nodelist[0][0] port = nodelist[0][1] service_check_tags = service_check_tags + ["host:%s" % host, "port:%s" % port] timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000 try: cli = pymongo.mongo_client.MongoClient( server, socketTimeoutMS=timeout, connectTimeoutMS=timeout, serverSelectionTimeoutMS=timeout, read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED, **ssl_params ) # some commands can only go against the admin DB admindb = cli['admin'] db = cli[db_name] except Exception: self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags) raise # Authenticate do_auth = True use_x509 = ssl_params and not password if not username: self.log.debug(u"A username is required to authenticate to `%s`", server) do_auth = False if do_auth: if auth_source: msg = "authSource was specified in the the server URL: using '%s' as the authentication database" self.log.info(msg, auth_source) self._authenticate( cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags ) else: self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags) try: status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics) except Exception: self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags) raise else: self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags) if status['ok'] == 0: raise Exception(status['errmsg'].__str__()) ops = db.current_op() status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0 status['stats'] = db.command('dbstats') dbstats = {db_name: {'stats': status['stats']}} # Handle replica data, if any # See # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa if is_affirmative(instance.get('replica_check', True)): try: data = {} replSet = admindb.command('replSetGetStatus') if replSet: primary = None current = None # need a new connection to deal with replica sets setname = replSet.get('set') cli_rs = pymongo.mongo_client.MongoClient( server, socketTimeoutMS=timeout, connectTimeoutMS=timeout, serverSelectionTimeoutMS=timeout, replicaset=setname, read_preference=pymongo.ReadPreference.NEAREST, **ssl_params ) if do_auth: if auth_source: self._authenticate( cli_rs[auth_source], username, password, use_x509, server, service_check_tags ) else: self._authenticate( cli_rs[db_name], username, password, use_x509, server, service_check_tags ) # Replication set information replset_name = replSet['set'] replset_state = self.get_state_name(replSet['myState']).lower() tags.extend([u"replset_name:{0}".format(replset_name), u"replset_state:{0}".format(replset_state)]) # Find nodes: master and current node (ourself) for member in replSet.get('members'): if member.get('self'): current = member if int(member.get('state')) == 1: primary = member # Compute a lag time if current is not None and primary is not None: if 'optimeDate' in primary and 'optimeDate' in current: lag = primary['optimeDate'] - current['optimeDate'] data['replicationLag'] = total_seconds(lag) if current is not None: data['health'] = current['health'] data['state'] = replSet['myState'] if current is not None: total = 0.0 cfg = cli_rs['local']['system.replset'].find_one() for member in cfg.get('members'): total += member.get('votes', 1) if member['_id'] == current['_id']: data['votes'] = member.get('votes', 1) data['voteFraction'] = data['votes'] / total status['replSet'] = data # Submit events self._report_replica_set_state(data['state'], clean_server_name, replset_name) except Exception as e: if "OperationFailure" in repr(e) and ( "not running with --replSet" in str(e) or "replSetGetStatus" in str(e) ): pass else: raise e # If these keys exist, remove them for now as they cannot be serialized try: status['backgroundFlushing'].pop('last_finished') except KeyError: pass try: status.pop('localTime') except KeyError: pass dbnames = cli.database_names() self.gauge('mongodb.dbs', len(dbnames), tags=tags) for db_n in dbnames: db_aux = cli[db_n] dbstats[db_n] = {'stats': db_aux.command('dbstats')} # Go through the metrics and save the values for metric_name in metrics_to_collect: # each metric is of the form: x.y.z with z optional # and can be found at status[x][y][z] value = status if metric_name.startswith('stats'): continue else: try: for c in metric_name.split("."): value = value[c] except KeyError: continue # value is now status[x][y][z] if not isinstance(value, (int, long, float)): raise TypeError( u"{0} value is a {1}, it should be an int, a float or a long instead.".format( metric_name, type(value) ) ) # Submit the metric submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect) submit_method(self, metric_name_alias, value, tags=tags) for st, value in iteritems(dbstats): for metric_name in metrics_to_collect: if not metric_name.startswith('stats.'): continue try: val = value['stats'][metric_name.split('.')[1]] except KeyError: continue # value is now status[x][y][z] if not isinstance(val, (int, long, float)): raise TypeError( u"{0} value is a {1}, it should be an int, a float or a long instead.".format( metric_name, type(val) ) ) # Submit the metric metrics_tags = tags + [ u"cluster:db:{0}".format(st), # FIXME 6.0 - keep for backward compatibility u"db:{0}".format(st), ] submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect) submit_method(self, metric_name_alias, val, tags=metrics_tags) if is_affirmative(instance.get('collections_indexes_stats')): mongo_version = cli.server_info().get('version', '0.0') if LooseVersion(mongo_version) >= LooseVersion("3.2"): self._collect_indexes_stats(instance, db, tags) else: msg = "'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s" self.log.error(msg, mongo_version) # Report the usage metrics for dbs/collections if 'top' in additional_metrics: try: dbtop = admindb.command('top') for ns, ns_metrics in iteritems(dbtop['totals']): if "." not in ns: continue # configure tags for db name and collection name dbname, collname = ns.split(".", 1) ns_tags = tags + ["db:%s" % dbname, "collection:%s" % collname] # iterate over DBTOP metrics for m in self.TOP_METRICS: # each metric is of the form: x.y.z with z optional # and can be found at ns_metrics[x][y][z] value = ns_metrics try: for c in m.split("."): value = value[c] except Exception: continue # value is now status[x][y][z] if not isinstance(value, (int, long, float)): raise TypeError( u"{0} value is a {1}, it should be an int, a float or a long instead.".format( m, type(value) ) ) # Submit the metric submit_method, metric_name_alias = self._resolve_metric(m, metrics_to_collect, prefix="usage") submit_method(self, metric_name_alias, value, tags=ns_tags) # Keep old incorrect metric if metric_name_alias.endswith('countps'): GAUGE(self, metric_name_alias[:-2], value, tags=ns_tags) except Exception as e: self.log.warning('Failed to record `top` metrics %s' % str(e)) if 'local' in dbnames: # it might not be if we are connectiing through mongos # Fetch information analogous to Mongo's db.getReplicationInfo() localdb = cli['local'] oplog_data = {} for ol_collection_name in ("oplog.rs", "oplog.$main"): ol_options = localdb[ol_collection_name].options() if ol_options: break if ol_options: try: oplog_data['logSizeMB'] = round_value(ol_options['size'] / 2.0 ** 20, 2) oplog = localdb[ol_collection_name] oplog_data['usedSizeMB'] = round_value( localdb.command("collstats", ol_collection_name)['size'] / 2.0 ** 20, 2 ) op_asc_cursor = oplog.find({"ts": {"$exists": 1}}).sort("$natural", pymongo.ASCENDING).limit(1) op_dsc_cursor = oplog.find({"ts": {"$exists": 1}}).sort("$natural", pymongo.DESCENDING).limit(1) try: first_timestamp = op_asc_cursor[0]['ts'].as_datetime() last_timestamp = op_dsc_cursor[0]['ts'].as_datetime() oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp) except (IndexError, KeyError): # if the oplog collection doesn't have any entries # if an object in the collection doesn't have a ts value, we ignore it pass except KeyError: # encountered an error trying to access options.size for the oplog collection self.log.warning(u"Failed to record `ReplicationInfo` metrics.") for m, value in iteritems(oplog_data): submit_method, metric_name_alias = self._resolve_metric('oplog.%s' % m, metrics_to_collect) submit_method(self, metric_name_alias, value, tags=tags) else: self.log.debug('"local" database not in dbnames. Not collecting ReplicationInfo metrics') # get collection level stats try: # Ensure that you're on the right db db = cli[db_name] # grab the collections from the configutation coll_names = instance.get('collections', []) # loop through the collections for coll_name in coll_names: # grab the stats from the collection stats = db.command("collstats", coll_name) # loop through the metrics for m in self.collection_metrics_names: coll_tags = tags + ["db:%s" % db_name, "collection:%s" % coll_name] value = stats.get(m, None) if not value: continue # if it's the index sizes, then it's a dict. if m == 'indexSizes': submit_method, metric_name_alias = self._resolve_metric( 'collection.%s' % m, self.COLLECTION_METRICS ) # loop through the indexes for idx, val in iteritems(value): # we tag the index idx_tags = coll_tags + ["index:%s" % idx] submit_method(self, metric_name_alias, val, tags=idx_tags) else: submit_method, metric_name_alias = self._resolve_metric( 'collection.%s' % m, self.COLLECTION_METRICS ) submit_method(self, metric_name_alias, value, tags=coll_tags) except Exception as e: self.log.warning(u"Failed to record `collection` metrics.") self.log.exception(e)
[ "def", "check", "(", "self", ",", "instance", ")", ":", "def", "total_seconds", "(", "td", ")", ":", "\"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"", "if", "hasattr", "(", "td", ",", "'total_sec...
Returns a dictionary that looks a lot like what's sent back by db.serverStatus()
[ "Returns", "a", "dictionary", "that", "looks", "a", "lot", "like", "what", "s", "sent", "back", "by", "db", ".", "serverStatus", "()" ]
python
train
nion-software/nionswift
nion/swift/LinePlotCanvasItem.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/LinePlotCanvasItem.py#L402-L538
def prepare_display(self): """Prepare the display. This method gets called by the canvas layout/draw engine after being triggered by a call to `update`. When data or display parameters change, the internal state of the line plot gets updated. This method takes that internal state and updates the child canvas items. This method is always run on a thread and should be fast but doesn't need to be instant. """ displayed_dimensional_calibration = self.__displayed_dimensional_calibration intensity_calibration = self.__intensity_calibration calibration_style = self.__calibration_style y_min = self.__y_min y_max = self.__y_max y_style = self.__y_style left_channel = self.__left_channel right_channel = self.__right_channel scalar_xdata_list = None def calculate_scalar_xdata(xdata_list): scalar_xdata_list = list() for xdata in xdata_list: if xdata: scalar_data = Image.scalar_from_array(xdata.data) scalar_data = Image.convert_to_grayscale(scalar_data) scalar_intensity_calibration = calibration_style.get_intensity_calibration(xdata) scalar_dimensional_calibrations = calibration_style.get_dimensional_calibrations(xdata.dimensional_shape, xdata.dimensional_calibrations) if displayed_dimensional_calibration.units == scalar_dimensional_calibrations[-1].units and intensity_calibration.units == scalar_intensity_calibration.units: # the data needs to have an intensity scale matching intensity_calibration. convert the data to use the common scale. scale = scalar_intensity_calibration.scale / intensity_calibration.scale offset = (scalar_intensity_calibration.offset - intensity_calibration.offset) / intensity_calibration.scale scalar_data = scalar_data * scale + offset scalar_xdata_list.append(DataAndMetadata.new_data_and_metadata(scalar_data, scalar_intensity_calibration, scalar_dimensional_calibrations)) else: scalar_xdata_list.append(None) return scalar_xdata_list data_scale = self.__data_scale xdata_list = self.__xdata_list if data_scale is not None: # update the line graph data left_channel = left_channel if left_channel is not None else 0 right_channel = right_channel if right_channel is not None else data_scale left_channel, right_channel = min(left_channel, right_channel), max(left_channel, right_channel) scalar_data_list = None if y_min is None or y_max is None and len(xdata_list) > 0: scalar_xdata_list = calculate_scalar_xdata(xdata_list) scalar_data_list = [xdata.data if xdata else None for xdata in scalar_xdata_list] calibrated_data_min, calibrated_data_max, y_ticker = LineGraphCanvasItem.calculate_y_axis(scalar_data_list, y_min, y_max, intensity_calibration, y_style) axes = LineGraphCanvasItem.LineGraphAxes(data_scale, calibrated_data_min, calibrated_data_max, left_channel, right_channel, displayed_dimensional_calibration, intensity_calibration, y_style, y_ticker) if scalar_xdata_list is None: if len(xdata_list) > 0: scalar_xdata_list = calculate_scalar_xdata(xdata_list) else: scalar_xdata_list = list() if self.__display_frame_rate_id: Utility.fps_tick("prepare_"+self.__display_frame_rate_id) colors = ('#1E90FF', "#F00", "#0F0", "#00F", "#FF0", "#0FF", "#F0F", "#888", "#800", "#080", "#008", "#CCC", "#880", "#088", "#808", "#964B00") display_layers = self.__display_layers if len(display_layers) == 0: index = 0 for scalar_index, scalar_xdata in enumerate(scalar_xdata_list): if scalar_xdata and scalar_xdata.is_data_1d: if index < 16: display_layers.append({"fill_color": colors[index] if index == 0 else None, "stroke_color": colors[index] if index > 0 else None, "data_index": scalar_index}) index += 1 if scalar_xdata and scalar_xdata.is_data_2d: for row in range(min(scalar_xdata.data_shape[-1], 16)): if index < 16: display_layers.append({"fill_color": colors[index] if index == 0 else None, "stroke_color": colors[index] if index > 0 else None, "data_index": scalar_index, "data_row": row}) index += 1 display_layer_count = len(display_layers) self.___has_valid_drawn_graph_data = False for index, display_layer in enumerate(display_layers): if index < 16: fill_color = display_layer.get("fill_color") stroke_color = display_layer.get("stroke_color") data_index = display_layer.get("data_index", 0) data_row = display_layer.get("data_row", 0) if 0 <= data_index < len(scalar_xdata_list): scalar_xdata = scalar_xdata_list[data_index] if scalar_xdata: data_row = max(0, min(scalar_xdata.dimensional_shape[0] - 1, data_row)) intensity_calibration = scalar_xdata.intensity_calibration displayed_dimensional_calibration = scalar_xdata.dimensional_calibrations[-1] if scalar_xdata.is_data_2d: scalar_data = scalar_xdata.data[data_row:data_row + 1, :].reshape((scalar_xdata.dimensional_shape[-1],)) scalar_xdata = DataAndMetadata.new_data_and_metadata(scalar_data, intensity_calibration, [displayed_dimensional_calibration]) line_graph_canvas_item = self.__line_graph_stack.canvas_items[display_layer_count - (index + 1)] line_graph_canvas_item.set_fill_color(fill_color) line_graph_canvas_item.set_stroke_color(stroke_color) line_graph_canvas_item.set_axes(axes) line_graph_canvas_item.set_uncalibrated_xdata(scalar_xdata) self.___has_valid_drawn_graph_data = scalar_xdata is not None for index in range(len(display_layers), 16): line_graph_canvas_item = self.__line_graph_stack.canvas_items[index] line_graph_canvas_item.set_axes(None) line_graph_canvas_item.set_uncalibrated_xdata(None) legend_position = self.__legend_position LegendEntry = collections.namedtuple("LegendEntry", ["label", "fill_color", "stroke_color"]) legend_entries = list() for index, display_layer in enumerate(self.__display_layers): data_index = display_layer.get("data_index", None) data_row = display_layer.get("data_row", None) label = display_layer.get("label", str()) if not label: if data_index is not None and data_row is not None: label = "Data {}:{}".format(data_index, data_row) elif data_index is not None: label = "Data {}".format(data_index) else: label = "Unknown" fill_color = display_layer.get("fill_color") stroke_color = display_layer.get("stroke_color") legend_entries.append(LegendEntry(label, fill_color, stroke_color)) self.__update_canvas_items(axes, legend_position, legend_entries) else: for line_graph_canvas_item in self.__line_graph_stack.canvas_items: line_graph_canvas_item.set_axes(None) line_graph_canvas_item.set_uncalibrated_xdata(None) self.__line_graph_xdata_list = list() self.__update_canvas_items(LineGraphCanvasItem.LineGraphAxes(), None, None)
[ "def", "prepare_display", "(", "self", ")", ":", "displayed_dimensional_calibration", "=", "self", ".", "__displayed_dimensional_calibration", "intensity_calibration", "=", "self", ".", "__intensity_calibration", "calibration_style", "=", "self", ".", "__calibration_style", ...
Prepare the display. This method gets called by the canvas layout/draw engine after being triggered by a call to `update`. When data or display parameters change, the internal state of the line plot gets updated. This method takes that internal state and updates the child canvas items. This method is always run on a thread and should be fast but doesn't need to be instant.
[ "Prepare", "the", "display", "." ]
python
train
pypa/pipenv
pipenv/vendor/backports/enum/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/backports/enum/__init__.py#L86-L91
def _make_class_unpicklable(cls): """Make the given class un-picklable.""" def _break_on_call_reduce(self, protocol=None): raise TypeError('%r cannot be pickled' % self) cls.__reduce_ex__ = _break_on_call_reduce cls.__module__ = '<unknown>'
[ "def", "_make_class_unpicklable", "(", "cls", ")", ":", "def", "_break_on_call_reduce", "(", "self", ",", "protocol", "=", "None", ")", ":", "raise", "TypeError", "(", "'%r cannot be pickled'", "%", "self", ")", "cls", ".", "__reduce_ex__", "=", "_break_on_call_...
Make the given class un-picklable.
[ "Make", "the", "given", "class", "un", "-", "picklable", "." ]
python
train
Infinidat/infi.clickhouse_orm
src/infi/clickhouse_orm/fields.py
https://github.com/Infinidat/infi.clickhouse_orm/blob/595f2023e334e3925a5c3fbfdd6083a5992a7169/src/infi/clickhouse_orm/fields.py#L395-L410
def create_ad_hoc_field(cls, db_type): ''' Give an SQL column description such as "Enum8('apple' = 1, 'banana' = 2, 'orange' = 3)" this method returns a matching enum field. ''' import re try: Enum # exists in Python 3.4+ except NameError: from enum import Enum # use the enum34 library instead members = {} for match in re.finditer("'(\w+)' = (\d+)", db_type): members[match.group(1)] = int(match.group(2)) enum_cls = Enum('AdHocEnum', members) field_class = Enum8Field if db_type.startswith('Enum8') else Enum16Field return field_class(enum_cls)
[ "def", "create_ad_hoc_field", "(", "cls", ",", "db_type", ")", ":", "import", "re", "try", ":", "Enum", "# exists in Python 3.4+", "except", "NameError", ":", "from", "enum", "import", "Enum", "# use the enum34 library instead", "members", "=", "{", "}", "for", ...
Give an SQL column description such as "Enum8('apple' = 1, 'banana' = 2, 'orange' = 3)" this method returns a matching enum field.
[ "Give", "an", "SQL", "column", "description", "such", "as", "Enum8", "(", "apple", "=", "1", "banana", "=", "2", "orange", "=", "3", ")", "this", "method", "returns", "a", "matching", "enum", "field", "." ]
python
train
marl/jams
jams/core.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/core.py#L2047-L2078
def match_query(string, query): '''Test if a string matches a query. Parameters ---------- string : str The string to test query : string, callable, or object Either a regular expression, callable function, or object. Returns ------- match : bool `True` if: - `query` is a callable and `query(string) == True` - `query` is a regular expression and `re.match(query, string)` - or `string == query` for any other query `False` otherwise ''' if six.callable(query): return query(string) elif (isinstance(query, six.string_types) and isinstance(string, six.string_types)): return re.match(query, string) is not None else: return query == string
[ "def", "match_query", "(", "string", ",", "query", ")", ":", "if", "six", ".", "callable", "(", "query", ")", ":", "return", "query", "(", "string", ")", "elif", "(", "isinstance", "(", "query", ",", "six", ".", "string_types", ")", "and", "isinstance"...
Test if a string matches a query. Parameters ---------- string : str The string to test query : string, callable, or object Either a regular expression, callable function, or object. Returns ------- match : bool `True` if: - `query` is a callable and `query(string) == True` - `query` is a regular expression and `re.match(query, string)` - or `string == query` for any other query `False` otherwise
[ "Test", "if", "a", "string", "matches", "a", "query", "." ]
python
valid
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/gallery/gallery_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/gallery/gallery_client.py#L1368-L1390
def update_question(self, question, publisher_name, extension_name, question_id): """UpdateQuestion. [Preview API] Updates an existing question for an extension. :param :class:`<Question> <azure.devops.v5_1.gallery.models.Question>` question: Updated question to be set for the extension. :param str publisher_name: Name of the publisher who published the extension. :param str extension_name: Name of the extension. :param long question_id: Identifier of the question to be updated for the extension. :rtype: :class:`<Question> <azure.devops.v5_1.gallery.models.Question>` """ route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if question_id is not None: route_values['questionId'] = self._serialize.url('question_id', question_id, 'long') content = self._serialize.body(question, 'Question') response = self._send(http_method='PATCH', location_id='6d1d9741-eca8-4701-a3a5-235afc82dfa4', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('Question', response)
[ "def", "update_question", "(", "self", ",", "question", ",", "publisher_name", ",", "extension_name", ",", "question_id", ")", ":", "route_values", "=", "{", "}", "if", "publisher_name", "is", "not", "None", ":", "route_values", "[", "'publisherName'", "]", "=...
UpdateQuestion. [Preview API] Updates an existing question for an extension. :param :class:`<Question> <azure.devops.v5_1.gallery.models.Question>` question: Updated question to be set for the extension. :param str publisher_name: Name of the publisher who published the extension. :param str extension_name: Name of the extension. :param long question_id: Identifier of the question to be updated for the extension. :rtype: :class:`<Question> <azure.devops.v5_1.gallery.models.Question>`
[ "UpdateQuestion", ".", "[", "Preview", "API", "]", "Updates", "an", "existing", "question", "for", "an", "extension", ".", ":", "param", ":", "class", ":", "<Question", ">", "<azure", ".", "devops", ".", "v5_1", ".", "gallery", ".", "models", ".", "Quest...
python
train
bspaans/python-mingus
mingus/core/chords.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/chords.py#L1021-L1082
def determine_extended_chord5(chord, shorthand=False, no_inversions=False, no_polychords=False): """Determine the names of an extended chord.""" if len(chord) != 5: # warning raise exeption: not an extended chord return False def inversion_exhauster(chord, shorthand, tries, result, polychords): """Recursive helper function.""" def add_result(short): result.append((short, tries, chord[0])) triads = determine_triad(chord[:3], True, True) sevenths = determine_seventh(chord[:4], True, True, True) # Determine polychords if tries == 1 and not no_polychords: polychords += determine_polychords(chord, shorthand) intval4 = intervals.determine(chord[0], chord[4]) for seventh in sevenths: seventh = seventh[len(chord[0]):] if seventh == 'M7': if intval4 == 'major second': add_result('M9') elif seventh == 'm7': if intval4 == 'major second': add_result('m9') elif intval4 == 'perfect fourth': add_result('m11') elif seventh == '7': if intval4 == 'major second': add_result('9') elif intval4 == 'minor second': add_result('7b9') elif intval4 == 'augmented second': add_result('7#9') elif intval4 == 'minor third': add_result('7b12') elif intval4 == 'augmented fourth': add_result('7#11') elif intval4 == 'major sixth': add_result('13') elif seventh == 'M6': if intval4 == 'major second': add_result('6/9') elif intval4 == 'minor seventh': add_result('6/7') if tries != 5 and not no_inversions: return inversion_exhauster([chord[-1]] + chord[:-1], shorthand, tries + 1, result, polychords) else: res = [] for r in result: if shorthand: res.append(r[2] + r[0]) else: res.append(r[2] + chord_shorthand_meaning[r[0]] + int_desc(r[1])) return res + polychords return inversion_exhauster(chord, shorthand, 1, [], [])
[ "def", "determine_extended_chord5", "(", "chord", ",", "shorthand", "=", "False", ",", "no_inversions", "=", "False", ",", "no_polychords", "=", "False", ")", ":", "if", "len", "(", "chord", ")", "!=", "5", ":", "# warning raise exeption: not an extended chord", ...
Determine the names of an extended chord.
[ "Determine", "the", "names", "of", "an", "extended", "chord", "." ]
python
train
JNRowe/jnrbase
jnrbase/git.py
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/git.py#L26-L52
def find_tag(__matcher: str = 'v[0-9]*', *, strict: bool = True, git_dir: str = '.') -> str: """Find closest tag for a git repository. Note: This defaults to `Semantic Version`_ tag matching. Args: __matcher: Glob-style tag pattern to match strict: Allow commit-ish, if no tag found git_dir: Repository to search Returns: Matching tag name .. _Semantic Version: http://semver.org/ """ command = 'git describe --abbrev=12 --dirty'.split() with chdir(git_dir): try: stdout = check_output(command + ['--match={}'.format(__matcher), ]) except CalledProcessError: if strict: raise stdout = check_output(command + ['--always', ]) stdout = stdout.decode('ascii', 'replace') return stdout.strip()
[ "def", "find_tag", "(", "__matcher", ":", "str", "=", "'v[0-9]*'", ",", "*", ",", "strict", ":", "bool", "=", "True", ",", "git_dir", ":", "str", "=", "'.'", ")", "->", "str", ":", "command", "=", "'git describe --abbrev=12 --dirty'", ".", "split", "(", ...
Find closest tag for a git repository. Note: This defaults to `Semantic Version`_ tag matching. Args: __matcher: Glob-style tag pattern to match strict: Allow commit-ish, if no tag found git_dir: Repository to search Returns: Matching tag name .. _Semantic Version: http://semver.org/
[ "Find", "closest", "tag", "for", "a", "git", "repository", "." ]
python
train
sosy-lab/benchexec
benchexec/containerexecutor.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/containerexecutor.py#L656-L918
def _setup_container_filesystem(self, temp_dir, output_dir, memlimit, memory_nodes): """Setup the filesystem layout in the container. As first step, we create a copy of all existing mountpoints in mount_base, recursively, and as "private" mounts (i.e., changes to existing mountpoints afterwards won't propagate to our copy). Then we iterate over all mountpoints and change them according to the mode the user has specified (hidden, read-only, overlay, or full-access). This has do be done for each mountpoint because overlays are not recursive. Then we chroot into the new mount hierarchy. The new filesystem layout still has a view of the host's /proc. We do not mount a fresh /proc here because the grandchild still needs the old /proc. We do simply iterate over all existing mount points and set them to read-only/overlay them, because it is easier to create a new hierarchy and chroot into it. First, we still have access to the original mountpoints while doing so, and second, we avoid race conditions if someone else changes the existing mountpoints. @param temp_dir: The base directory under which all our directories should be created. """ # All strings here are bytes to avoid issues if existing mountpoints are invalid UTF-8. temp_base = self._get_result_files_base(temp_dir).encode() # directory with files created by tool temp_dir = temp_dir.encode() tmpfs_opts = ["size=" + str(memlimit or "100%")] if memory_nodes: tmpfs_opts.append("mpol=bind:" + ",".join(map(str, memory_nodes))) tmpfs_opts = (",".join(tmpfs_opts)).encode() if self._container_tmpfs: libc.mount(None, temp_dir, b"tmpfs", 0, tmpfs_opts) mount_base = os.path.join(temp_dir, b"mount") # base dir for container mounts os.mkdir(mount_base) os.mkdir(temp_base) def _is_below(path, target_path): # compare with trailing slashes for cases like /foo and /foobar path = os.path.join(path, b"") target_path = os.path.join(target_path, b"") return path.startswith(target_path) def find_mode_for_dir(path, fstype=None): if (path == b"/proc"): # /proc is necessary for the grandchild to read PID, will be replaced later. return DIR_READ_ONLY if _is_below(path, b"/proc"): # Irrelevant. return None parent_mode = None result_mode = None for special_dir, mode in self._dir_modes.items(): if _is_below(path, special_dir): if path != special_dir: parent_mode = mode result_mode = mode assert result_mode is not None if result_mode == DIR_OVERLAY and ( _is_below(path, b"/dev") or _is_below(path, b"/sys") or fstype == b"cgroup"): # Overlay does not make sense for /dev, /sys, and all cgroups. return DIR_READ_ONLY if result_mode == DIR_OVERLAY and ( fstype == b"autofs" or fstype == b"vfat" or fstype == b"ntfs"): # Overlayfs does not support these as underlying file systems. logging.debug("Cannot use overlay mode for %s because it has file system %s. " "Using read-only mode instead.", path.decode(), fstype.decode()) return DIR_READ_ONLY if result_mode == DIR_HIDDEN and parent_mode == DIR_HIDDEN: # No need to recursively recreate mountpoints in hidden dirs. return None return result_mode # Overlayfs needs its own additional temporary directory ("work" directory). # temp_base will be the "upper" layer, the host FS the "lower" layer, # and mount_base the mount target. work_base = os.path.join(temp_dir, b"overlayfs") os.mkdir(work_base) # Create a copy of host's mountpoints. # Setting MS_PRIVATE flag discouples our mount namespace from the hosts's, # i.e., mounts we do are not seen by the host, and any (un)mounts the host does afterward # are not seen by us. The latter is desired such that new mounts (e.g., # USB sticks being plugged in) do not appear in the container. # Blocking host-side unmounts from being propagated has the disadvantage # that any unmounts done by the sysadmin won't really unmount the device # because it stays mounted in the container and thus keep the device busy # (cf. https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=739593#85). # We could allow unmounts being propated with MS_SLAVE instead of MS_PRIVATE, # but we prefer to have the mount namespace of the container being # unchanged during run execution. container.make_bind_mount(b"/", mount_base, recursive=True, private=True) # Ensure each special dir is a mountpoint such that the next loop covers it. for special_dir in self._dir_modes.keys(): mount_path = mount_base + special_dir temp_path = temp_base + special_dir try: container.make_bind_mount(mount_path, mount_path) except OSError as e: # on btrfs, non-recursive bind mounts faitl if e.errno == errno.EINVAL: try: container.make_bind_mount(mount_path, mount_path, recursive=True) except OSError as e2: logging.debug("Failed to make %s a (recursive) bind mount: %s", mount_path, e2) else: logging.debug("Failed to make %s a bind mount: %s", mount_path, e) if not os.path.exists(temp_path): os.makedirs(temp_path) # Set desired access mode for each mountpoint. for unused_source, full_mountpoint, fstype, options in list(container.get_mount_points()): if not _is_below(full_mountpoint, mount_base): continue mountpoint = full_mountpoint[len(mount_base):] or b"/" mode = find_mode_for_dir(mountpoint, fstype) if not mode: continue if not os.access(os.path.dirname(mountpoint), os.X_OK): # If parent is not accessible we cannot mount something on mountpoint. # We mark the inaccessible directory as hidden because otherwise the mountpoint # could become accessible (directly!) if the permissions on the parent # are relaxed during container execution. original_mountpoint = mountpoint parent = os.path.dirname(mountpoint) while not os.access(parent, os.X_OK): mountpoint = parent parent = os.path.dirname(mountpoint) mode = DIR_HIDDEN logging.debug( "Marking inaccessible directory '%s' as hidden " "because it contains a mountpoint at '%s'", mountpoint.decode(), original_mountpoint.decode()) else: logging.debug("Mounting '%s' as %s", mountpoint.decode(), mode) mount_path = mount_base + mountpoint temp_path = temp_base + mountpoint work_path = work_base + mountpoint if mode == DIR_OVERLAY: if not os.path.exists(temp_path): os.makedirs(temp_path) if not os.path.exists(work_path): os.makedirs(work_path) try: # Previous mount in this place not needed if replaced with overlay dir. libc.umount(mount_path) except OSError as e: logging.debug(e) try: container.make_overlay_mount(mount_path, mountpoint, temp_path, work_path) except OSError as e: raise OSError(e.errno, "Creating overlay mount for '{}' failed: {}. " "Please use other directory modes." .format(mountpoint.decode(), os.strerror(e.errno))) elif mode == DIR_HIDDEN: if not os.path.exists(temp_path): os.makedirs(temp_path) try: # Previous mount in this place not needed if replaced with hidden dir. libc.umount(mount_path) except OSError as e: logging.debug(e) container.make_bind_mount(temp_path, mount_path) elif mode == DIR_READ_ONLY: try: container.remount_with_additional_flags(mount_path, options, libc.MS_RDONLY) except OSError as e: if e.errno == errno.EACCES: logging.warning( "Cannot mount '%s', directory may be missing from container.", mountpoint.decode()) else: # If this mountpoint is below an overlay/hidden dir re-create mountpoint. # Linux does not support making read-only bind mounts in one step: # https://lwn.net/Articles/281157/ http://man7.org/linux/man-pages/man8/mount.8.html container.make_bind_mount( mountpoint, mount_path, recursive=True, private=True) container.remount_with_additional_flags(mount_path, options, libc.MS_RDONLY) elif mode == DIR_FULL_ACCESS: try: # Ensure directory is still a mountpoint by attempting to remount. container.remount_with_additional_flags(mount_path, options, 0) except OSError as e: if e.errno == errno.EACCES: logging.warning( "Cannot mount '%s', directory may be missing from container.", mountpoint.decode()) else: # If this mountpoint is below an overlay/hidden dir re-create mountpoint. container.make_bind_mount( mountpoint, mount_path, recursive=True, private=True) else: assert False # Now configure some special hard-coded cases def make_tmpfs_dir(path): """Ensure that a tmpfs is mounted on path, if the path exists""" if path in self._dir_modes: return # explicitly configured by user mount_tmpfs = mount_base + path temp_tmpfs = temp_base + path util.makedirs(temp_tmpfs, exist_ok=True) if os.path.isdir(mount_tmpfs): # If we already have a tmpfs, we can just bind mount it, otherwise we need one if self._container_tmpfs: container.make_bind_mount(temp_tmpfs, mount_tmpfs) else: libc.mount(None, mount_tmpfs, b"tmpfs", 0, tmpfs_opts) # The following directories should be writable RAM disks for Posix shared memory. # For example, the Python multiprocessing module explicitly checks for a tmpfs instance. make_tmpfs_dir(b"/dev/shm") make_tmpfs_dir(b"/run/shm") if self._container_system_config: # If overlayfs is not used for /etc, we need additional bind mounts # for files in /etc that we want to override, like /etc/passwd config_mount_base = mount_base if find_mode_for_dir(b"/etc") != DIR_OVERLAY else None container.setup_container_system_config(temp_base, config_mount_base ) if output_dir: # We need a way to see temp_base in the container in order to be able to copy result # files out of it, so we need a directory that is guaranteed to exist in order to use # it as mountpoint for a bind mount to temp_base. # Of course, the tool inside the container should not have access to temp_base, # so we will add another bind mount with an empty directory on top # (equivalent to --hidden-dir). After the tool terminates we can unmount # the top-level bind mount and then access temp_base. However, this works only # if there is no other mount point below that directory, and the user can force us # to create mount points at arbitrary directory if a directory mode is specified. # So we need an existing directory with no mount points below, and luckily temp_dir # fulfills all requirements (because we have just created it as fresh drectory ourselves). # So we mount temp_base outside of the container to temp_dir inside. util.makedirs(mount_base + temp_dir, exist_ok=True) container.make_bind_mount(temp_base, mount_base + temp_dir, read_only=True) # And the following if branch will automatically hide the bind # mount below an empty directory. # If necessary, (i.e., if /tmp is not already hidden), # hide the directory where we store our files from processes in the container # by mounting an empty directory over it. if os.path.exists(mount_base + temp_dir): util.makedirs(temp_base + temp_dir, exist_ok=True) container.make_bind_mount(temp_base + temp_dir, mount_base + temp_dir) os.chroot(mount_base)
[ "def", "_setup_container_filesystem", "(", "self", ",", "temp_dir", ",", "output_dir", ",", "memlimit", ",", "memory_nodes", ")", ":", "# All strings here are bytes to avoid issues if existing mountpoints are invalid UTF-8.", "temp_base", "=", "self", ".", "_get_result_files_ba...
Setup the filesystem layout in the container. As first step, we create a copy of all existing mountpoints in mount_base, recursively, and as "private" mounts (i.e., changes to existing mountpoints afterwards won't propagate to our copy). Then we iterate over all mountpoints and change them according to the mode the user has specified (hidden, read-only, overlay, or full-access). This has do be done for each mountpoint because overlays are not recursive. Then we chroot into the new mount hierarchy. The new filesystem layout still has a view of the host's /proc. We do not mount a fresh /proc here because the grandchild still needs the old /proc. We do simply iterate over all existing mount points and set them to read-only/overlay them, because it is easier to create a new hierarchy and chroot into it. First, we still have access to the original mountpoints while doing so, and second, we avoid race conditions if someone else changes the existing mountpoints. @param temp_dir: The base directory under which all our directories should be created.
[ "Setup", "the", "filesystem", "layout", "in", "the", "container", ".", "As", "first", "step", "we", "create", "a", "copy", "of", "all", "existing", "mountpoints", "in", "mount_base", "recursively", "and", "as", "private", "mounts", "(", "i", ".", "e", ".",...
python
train
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L3545-L3589
def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False): """Sets a software breakpoint at the specified address. If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if ``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a normal breakpoint is set. If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if ``ram`` is ``True``, the breakpoint is set in RAM. If both are ``True`` or both are ``False``, then the best option is chosen for setting the breakpoint in software. Args: self (JLink): the ``JLink`` instance addr (int): the address where the breakpoint will be set thumb (bool): boolean indicating to set the breakpoint in THUMB mode arm (bool): boolean indicating to set the breakpoint in ARM mode flash (bool): boolean indicating to set the breakpoint in flash ram (bool): boolean indicating to set the breakpoint in RAM Returns: An integer specifying the breakpoint handle. This handle should sbe retained for future breakpoint operations. Raises: TypeError: if the given address is not an integer. JLinkException: if the breakpoint could not be set. """ if flash and not ram: flags = enums.JLinkBreakpoint.SW_FLASH elif not flash and ram: flags = enums.JLinkBreakpoint.SW_RAM else: flags = enums.JLinkBreakpoint.SW if thumb: flags = flags | enums.JLinkBreakpoint.THUMB elif arm: flags = flags | enums.JLinkBreakpoint.ARM handle = self._dll.JLINKARM_SetBPEx(int(addr), flags) if handle <= 0: raise errors.JLinkException('Software breakpoint could not be set.') return handle
[ "def", "software_breakpoint_set", "(", "self", ",", "addr", ",", "thumb", "=", "False", ",", "arm", "=", "False", ",", "flash", "=", "False", ",", "ram", "=", "False", ")", ":", "if", "flash", "and", "not", "ram", ":", "flags", "=", "enums", ".", "...
Sets a software breakpoint at the specified address. If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if ``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a normal breakpoint is set. If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if ``ram`` is ``True``, the breakpoint is set in RAM. If both are ``True`` or both are ``False``, then the best option is chosen for setting the breakpoint in software. Args: self (JLink): the ``JLink`` instance addr (int): the address where the breakpoint will be set thumb (bool): boolean indicating to set the breakpoint in THUMB mode arm (bool): boolean indicating to set the breakpoint in ARM mode flash (bool): boolean indicating to set the breakpoint in flash ram (bool): boolean indicating to set the breakpoint in RAM Returns: An integer specifying the breakpoint handle. This handle should sbe retained for future breakpoint operations. Raises: TypeError: if the given address is not an integer. JLinkException: if the breakpoint could not be set.
[ "Sets", "a", "software", "breakpoint", "at", "the", "specified", "address", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/schema.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/schema.py#L1020-L1027
def is_sqlatype_text_over_one_char( coltype: Union[TypeEngine, VisitableType]) -> bool: """ Is the SQLAlchemy column type a string type that's more than one character long? """ coltype = _coltype_to_typeengine(coltype) return is_sqlatype_text_of_length_at_least(coltype, 2)
[ "def", "is_sqlatype_text_over_one_char", "(", "coltype", ":", "Union", "[", "TypeEngine", ",", "VisitableType", "]", ")", "->", "bool", ":", "coltype", "=", "_coltype_to_typeengine", "(", "coltype", ")", "return", "is_sqlatype_text_of_length_at_least", "(", "coltype",...
Is the SQLAlchemy column type a string type that's more than one character long?
[ "Is", "the", "SQLAlchemy", "column", "type", "a", "string", "type", "that", "s", "more", "than", "one", "character", "long?" ]
python
train
KrishnaswamyLab/graphtools
graphtools/graphs.py
https://github.com/KrishnaswamyLab/graphtools/blob/44685352be7df2005d44722903092207967457f2/graphtools/graphs.py#L483-L513
def set_params(self, **params): """Set parameters on this object Safe setter method - attributes should not be modified directly as some changes are not valid. Valid parameters: - n_landmark - n_svd Parameters ---------- params : key-value pairs of parameter name and new values Returns ------- self """ # update parameters reset_landmarks = False if 'n_landmark' in params and params['n_landmark'] != self.n_landmark: self.n_landmark = params['n_landmark'] reset_landmarks = True if 'n_svd' in params and params['n_svd'] != self.n_svd: self.n_svd = params['n_svd'] reset_landmarks = True # update superclass parameters super().set_params(**params) # reset things that changed if reset_landmarks: self._reset_landmarks() return self
[ "def", "set_params", "(", "self", ",", "*", "*", "params", ")", ":", "# update parameters", "reset_landmarks", "=", "False", "if", "'n_landmark'", "in", "params", "and", "params", "[", "'n_landmark'", "]", "!=", "self", ".", "n_landmark", ":", "self", ".", ...
Set parameters on this object Safe setter method - attributes should not be modified directly as some changes are not valid. Valid parameters: - n_landmark - n_svd Parameters ---------- params : key-value pairs of parameter name and new values Returns ------- self
[ "Set", "parameters", "on", "this", "object" ]
python
train
saltstack/salt
salt/roster/dir.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/roster/dir.py#L82-L100
def _render(roster_file, **kwargs): """ Render the roster file """ renderers = salt.loader.render(__opts__, {}) domain = __opts__.get('roster_domain', '') try: result = salt.template.compile_template(roster_file, renderers, __opts__['renderer'], __opts__['renderer_blacklist'], __opts__['renderer_whitelist'], mask_value='passw*', **kwargs) result.setdefault('host', '{}.{}'.format(os.path.basename(roster_file), domain)) return result except: # pylint: disable=W0702 log.warning('Unable to render roster file "%s".', roster_file, exc_info=True) return {}
[ "def", "_render", "(", "roster_file", ",", "*", "*", "kwargs", ")", ":", "renderers", "=", "salt", ".", "loader", ".", "render", "(", "__opts__", ",", "{", "}", ")", "domain", "=", "__opts__", ".", "get", "(", "'roster_domain'", ",", "''", ")", "try"...
Render the roster file
[ "Render", "the", "roster", "file" ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/fileutil.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/fileutil.py#L885-L904
def countExtn(fimg, extname='SCI'): """ Return the number of 'extname' extensions, defaulting to counting the number of SCI extensions. """ closefits = False if isinstance(fimg, string_types): fimg = fits.open(fimg) closefits = True n = 0 for e in fimg: if 'extname' in e.header and e.header['extname'] == extname: n += 1 if closefits: fimg.close() return n
[ "def", "countExtn", "(", "fimg", ",", "extname", "=", "'SCI'", ")", ":", "closefits", "=", "False", "if", "isinstance", "(", "fimg", ",", "string_types", ")", ":", "fimg", "=", "fits", ".", "open", "(", "fimg", ")", "closefits", "=", "True", "n", "="...
Return the number of 'extname' extensions, defaulting to counting the number of SCI extensions.
[ "Return", "the", "number", "of", "extname", "extensions", "defaulting", "to", "counting", "the", "number", "of", "SCI", "extensions", "." ]
python
train
evolbioinfo/pastml
pastml/acr.py
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/acr.py#L612-L740
def main(): """ Entry point, calling :py:func:`pastml.acr.pastml_pipeline` with command-line arguments. :return: void """ import argparse parser = argparse.ArgumentParser(description="Ancestral character reconstruction and visualisation " "for rooted phylogenetic trees.", prog='pastml') tree_group = parser.add_argument_group('tree-related arguments') tree_group.add_argument('-t', '--tree', help="input tree in newick format (must be rooted).", type=str, required=True) annotation_group = parser.add_argument_group('annotation-file-related arguments') annotation_group.add_argument('-d', '--data', required=True, type=str, help="annotation file in tab/csv format with the first row " "containing the column names.") annotation_group.add_argument('-s', '--data_sep', required=False, type=str, default='\t', help="column separator for the annotation table. " "By default is set to tab, i.e. for a tab-delimited file. " "Set it to ',' if your file is csv.") annotation_group.add_argument('-i', '--id_index', required=False, type=int, default=0, help="index of the annotation table column containing tree tip names, " "indices start from zero (by default is set to 0).") acr_group = parser.add_argument_group('ancestral-character-reconstruction-related arguments') acr_group.add_argument('-c', '--columns', nargs='*', help="names of the annotation table columns that contain characters " "to be analysed. " "If not specified, all columns are considered.", type=str) acr_group.add_argument('--prediction_method', choices=[MPPA, MAP, JOINT, DOWNPASS, ACCTRAN, DELTRAN, COPY, ALL, ML, MP], type=str, nargs='*', default=MPPA, help='ancestral character reconstruction (ACR) method, ' 'can be one of the max likelihood (ML) methods: {ml}, ' 'one of the max parsimony (MP) methods: {mp}; ' 'or {copy} to keep the annotated character states as-is without inference. ' 'One can also specify one of the meta-methods {meta} that would perform ACR ' 'with multiple methods (all of them for {meta_all}, ' 'all the ML methods for {meta_ml}, or all the MP methods for {meta_mp}) ' 'and save/visualise the results as multiple characters ' 'suffixed with the corresponding method.' 'When multiple ancestral characters are specified (see -c, --columns), ' 'the same method can be used for all of them (if only one method is specified), ' 'or different methods can be used (specified in the same order as -c, --columns). ' 'If multiple methods are given, but not for all the characters, ' 'for the rest of them the default method ({default}) is chosen.' .format(ml=', '.join(ML_METHODS), mp=', '.join(MP_METHODS), copy=COPY, default=MPPA, meta=', '.join(META_ML_METHODS | {MP}), meta_ml=ML, meta_mp=MP, meta_all=ALL)) acr_group.add_argument('--forced_joint', action='store_true', help='add {joint} state to the {mppa} state selection ' 'even if it is not selected by Brier score.'.format(joint=JOINT, mppa=MPPA)) acr_group.add_argument('-m', '--model', default=F81, choices=[JC, F81, EFT, HKY, JTT], type=str, nargs='*', help='evolutionary model for ML methods (ignored by MP methods). ' 'When multiple ancestral characters are specified (see -c, --columns), ' 'the same model can be used for all of them (if only one model is specified), ' 'or different models can be used (specified in the same order as -c, --columns). ' 'If multiple models are given, but not for all the characters, ' 'for the rest of them the default model ({}) is chosen.'.format(F81)) acr_group.add_argument('--parameters', type=str, nargs='*', help='optional way to fix some of the ML-method parameters ' 'by specifying files that contain them. ' 'Should be in the same order ' 'as the ancestral characters (see -c, --columns) ' 'for which the reconstruction is to be preformed. ' 'Could be given only for the first few characters. ' 'Each file should be tab-delimited, with two columns: ' 'the first one containing parameter names, ' 'and the second, named "value", containing parameter values. ' 'Parameters can include character state frequencies ' '(parameter name should be the corresponding state, ' 'and parameter value - the float frequency value, between 0 and 1),' 'and tree branch scaling factor (parameter name {}).'.format(SCALING_FACTOR)) vis_group = parser.add_argument_group('visualisation-related arguments') vis_group.add_argument('-n', '--name_column', type=str, default=None, help="name of the character to be used for node names " "in the compressed map visualisation " "(must be one of those specified via -c, --columns). " "If the annotation table contains only one column it will be used by default.") vis_group.add_argument('--date_column', required=False, default=None, help="name of the annotation table column that contains tip dates, " "if specified it is used to add a time slider to the visualisation.", type=str) vis_group.add_argument('--tip_size_threshold', type=int, default=REASONABLE_NUMBER_OF_TIPS, help="recursively remove the tips of size less than threshold-th largest tip" "from the compressed map (set to 1e10 to keep all tips). " "The larger it is the less tips will be trimmed.") out_group = parser.add_argument_group('output-related arguments') out_group.add_argument('-o', '--out_data', required=False, type=str, help="path to the output annotation file with the reconstructed ancestral character states.") out_group.add_argument('--work_dir', required=False, default=None, type=str, help="path to the folder where pastml parameter, named tree " "and marginal probability (for marginal ML methods ({}) only) files are to be stored. " "Default is <path_to_input_file>/<input_file_name>_pastml. " "If the folder does not exist, it will be created." .format(', '.join(MARGINAL_ML_METHODS))) out_group.add_argument('-p', '--html_compressed', required=False, default=None, type=str, help="path to the output compressed map visualisation file (html).") out_group.add_argument('-l', '--html', required=False, default=None, type=str, help="path to the output full tree visualisation file (html).") out_group.add_argument('-v', '--verbose', action='store_true', help="print information on the progress of the analysis (to console)") parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=PASTML_VERSION)) itol_group = parser.add_argument_group('iTOL-related arguments') itol_group.add_argument('--upload_to_itol', action='store_true', help="upload the ACR annotated tree to iTOL (https://itol.embl.de/)") itol_group.add_argument('--itol_id', required=False, default=None, type=str, help="iTOL user batch upload ID that enables uploading to your iTOL account " "(see https://itol.embl.de/help.cgi#batch). " "If not specified, the tree will not be associated to any account.") itol_group.add_argument('--itol_project', required=False, default=None, type=str, help="iTOL project the annotated tree should be associated with " "(must exist, and --itol_id must be specified). " "If not specified, the tree will not be associated with any project.") itol_group.add_argument('--itol_tree_name', required=False, default=None, type=str, help="name for the tree uploaded to iTOL.") params = parser.parse_args() pastml_pipeline(**vars(params))
[ "def", "main", "(", ")", ":", "import", "argparse", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Ancestral character reconstruction and visualisation \"", "\"for rooted phylogenetic trees.\"", ",", "prog", "=", "'pastml'", ")", "tree_group...
Entry point, calling :py:func:`pastml.acr.pastml_pipeline` with command-line arguments. :return: void
[ "Entry", "point", "calling", ":", "py", ":", "func", ":", "pastml", ".", "acr", ".", "pastml_pipeline", "with", "command", "-", "line", "arguments", "." ]
python
train
azogue/i2csense
i2csense/__init__.py
https://github.com/azogue/i2csense/blob/ecc6806dcee9de827a5414a9e836d271fedca9b9/i2csense/__init__.py#L123-L138
def dew_point_temperature(self): """Return the dew point temperature in ºC for the last measurement. For sensors implementing temperature and humidity values. Extracted from the HTU21D sensor spec sheet.""" if self.sample_ok: temperature = self._get_value_opc_attr('temperature', 3) humidity = self._get_value_opc_attr('humidity', 3) if temperature is not None and humidity is not None: # Calc dew point temperature in celsius coef_a, coef_b, coef_c = 8.1332, 1762.39, 235.66 part_press = 10 ** (coef_a - coef_b / (temperature + coef_c)) dewp = - coef_c dewp -= coef_b / (log10(humidity * part_press / 100.) - coef_a) return dewp return None
[ "def", "dew_point_temperature", "(", "self", ")", ":", "if", "self", ".", "sample_ok", ":", "temperature", "=", "self", ".", "_get_value_opc_attr", "(", "'temperature'", ",", "3", ")", "humidity", "=", "self", ".", "_get_value_opc_attr", "(", "'humidity'", ","...
Return the dew point temperature in ºC for the last measurement. For sensors implementing temperature and humidity values. Extracted from the HTU21D sensor spec sheet.
[ "Return", "the", "dew", "point", "temperature", "in", "ºC", "for", "the", "last", "measurement", "." ]
python
train
Erotemic/utool
utool/util_hash.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L968-L979
def get_file_uuid(fpath, hasher=None, stride=1): """ Creates a uuid from the hash of a file """ if hasher is None: hasher = hashlib.sha1() # 20 bytes of output #hasher = hashlib.sha256() # 32 bytes of output # sha1 produces a 20 byte hash hashbytes_20 = get_file_hash(fpath, hasher=hasher, stride=stride) # sha1 produces 20 bytes, but UUID requires 16 bytes hashbytes_16 = hashbytes_20[0:16] uuid_ = uuid.UUID(bytes=hashbytes_16) return uuid_
[ "def", "get_file_uuid", "(", "fpath", ",", "hasher", "=", "None", ",", "stride", "=", "1", ")", ":", "if", "hasher", "is", "None", ":", "hasher", "=", "hashlib", ".", "sha1", "(", ")", "# 20 bytes of output", "#hasher = hashlib.sha256() # 32 bytes of output", ...
Creates a uuid from the hash of a file
[ "Creates", "a", "uuid", "from", "the", "hash", "of", "a", "file" ]
python
train
SpikeInterface/spikeextractors
spikeextractors/SortingExtractor.py
https://github.com/SpikeInterface/spikeextractors/blob/cbe3b8778a215f0bbd743af8b306856a87e438e1/spikeextractors/SortingExtractor.py#L178-L203
def set_unit_property(self, unit_id, property_name, value): '''This function adds a unit property data set under the given property name to the given unit. Parameters ---------- unit_id: int The unit id for which the property will be set property_name: str The name of the property to be stored value The data associated with the given property name. Could be many formats as specified by the user. ''' if isinstance(unit_id, (int, np.integer)): if unit_id in self.get_unit_ids(): if unit_id not in self._unit_properties: self._unit_properties[unit_id] = {} if isinstance(property_name, str): self._unit_properties[unit_id][property_name] = value else: raise ValueError(str(property_name) + " must be a string") else: raise ValueError(str(unit_id) + " is not a valid unit_id") else: raise ValueError(str(unit_id) + " must be an int")
[ "def", "set_unit_property", "(", "self", ",", "unit_id", ",", "property_name", ",", "value", ")", ":", "if", "isinstance", "(", "unit_id", ",", "(", "int", ",", "np", ".", "integer", ")", ")", ":", "if", "unit_id", "in", "self", ".", "get_unit_ids", "(...
This function adds a unit property data set under the given property name to the given unit. Parameters ---------- unit_id: int The unit id for which the property will be set property_name: str The name of the property to be stored value The data associated with the given property name. Could be many formats as specified by the user.
[ "This", "function", "adds", "a", "unit", "property", "data", "set", "under", "the", "given", "property", "name", "to", "the", "given", "unit", "." ]
python
train
oscarbranson/latools
latools/filtering/clustering.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/clustering.py#L35-L56
def cluster_kmeans(data, n_clusters, **kwargs): """ Identify clusters using K - Means algorithm. Parameters ---------- data : array_like array of size [n_samples, n_features]. n_clusters : int The number of clusters expected in the data. Returns ------- dict boolean array for each identified cluster. """ km = cl.KMeans(n_clusters, **kwargs) kmf = km.fit(data) labels = kmf.labels_ return labels, [np.nan]
[ "def", "cluster_kmeans", "(", "data", ",", "n_clusters", ",", "*", "*", "kwargs", ")", ":", "km", "=", "cl", ".", "KMeans", "(", "n_clusters", ",", "*", "*", "kwargs", ")", "kmf", "=", "km", ".", "fit", "(", "data", ")", "labels", "=", "kmf", "."...
Identify clusters using K - Means algorithm. Parameters ---------- data : array_like array of size [n_samples, n_features]. n_clusters : int The number of clusters expected in the data. Returns ------- dict boolean array for each identified cluster.
[ "Identify", "clusters", "using", "K", "-", "Means", "algorithm", "." ]
python
test
MaxStrange/AudioSegment
algorithms/asa.py
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L406-L430
def _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id): """ Gets an onset_front and an offset_front such that they both occupy at least some of the same frequency channels, then returns the portion of each that overlaps with the other. """ # Get the onset front of interest onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id) # Get the offset front of interest offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id) # Keep trying consecutive portions of this onset front until we find a consecutive portion # that overlaps with part of the offset front consecutive_portions_of_onset_front = [c for c in _get_consecutive_portions_of_front(onset_front)] for consecutive_portion_of_onset_front in consecutive_portions_of_onset_front: # Only get the segment of this front that overlaps in frequencies with the onset front of interest onset_front_frequency_indexes = [f for f, _ in consecutive_portion_of_onset_front] overlapping_offset_front = [(f, s) for f, s in offset_front if f in onset_front_frequency_indexes] # Only get as much of this overlapping portion as is actually consecutive for consecutive_portion_of_offset_front in _get_consecutive_portions_of_front(overlapping_offset_front): if consecutive_portion_of_offset_front: # Just return the first one we get - if we get any it means we found a portion of overlap return consecutive_portion_of_onset_front, consecutive_portion_of_offset_front return [], []
[ "def", "_get_consecutive_and_overlapping_fronts", "(", "onset_fronts", ",", "offset_fronts", ",", "onset_front_id", ",", "offset_front_id", ")", ":", "# Get the onset front of interest", "onset_front", "=", "_get_front_idxs_from_id", "(", "onset_fronts", ",", "onset_front_id", ...
Gets an onset_front and an offset_front such that they both occupy at least some of the same frequency channels, then returns the portion of each that overlaps with the other.
[ "Gets", "an", "onset_front", "and", "an", "offset_front", "such", "that", "they", "both", "occupy", "at", "least", "some", "of", "the", "same", "frequency", "channels", "then", "returns", "the", "portion", "of", "each", "that", "overlaps", "with", "the", "ot...
python
test
zabertech/python-swampyer
swampyer/__init__.py
https://github.com/zabertech/python-swampyer/blob/31b040e7570455718709a496d6d9faacfb372a00/swampyer/__init__.py#L468-L479
def subscribe(self,topic,callback=None,options=None): """ Subscribe to a uri for events from a publisher """ full_topic = self.get_full_uri(topic) result = self.send_and_await_response(SUBSCRIBE( options=options or {}, topic=full_topic )) if result == WAMP_SUBSCRIBED: if not callback: callback = lambda a: None self._subscriptions[result.subscription_id] = [topic,callback]
[ "def", "subscribe", "(", "self", ",", "topic", ",", "callback", "=", "None", ",", "options", "=", "None", ")", ":", "full_topic", "=", "self", ".", "get_full_uri", "(", "topic", ")", "result", "=", "self", ".", "send_and_await_response", "(", "SUBSCRIBE", ...
Subscribe to a uri for events from a publisher
[ "Subscribe", "to", "a", "uri", "for", "events", "from", "a", "publisher" ]
python
train
jreese/aiosqlite
aiosqlite/core.py
https://github.com/jreese/aiosqlite/blob/3f548b568b8db9a57022b6e2c9627f5cdefb983f/aiosqlite/core.py#L222-L228
async def execute_fetchall( self, sql: str, parameters: Iterable[Any] = None ) -> Iterable[sqlite3.Row]: """Helper to execute a query and return all the data.""" if parameters is None: parameters = [] return await self._execute(self._execute_fetchall, sql, parameters)
[ "async", "def", "execute_fetchall", "(", "self", ",", "sql", ":", "str", ",", "parameters", ":", "Iterable", "[", "Any", "]", "=", "None", ")", "->", "Iterable", "[", "sqlite3", ".", "Row", "]", ":", "if", "parameters", "is", "None", ":", "parameters",...
Helper to execute a query and return all the data.
[ "Helper", "to", "execute", "a", "query", "and", "return", "all", "the", "data", "." ]
python
train
saltstack/salt
salt/modules/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cloud.py#L41-L49
def _get_client(): ''' Return a cloud client ''' client = salt.cloud.CloudClient( os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud'), pillars=copy.deepcopy(__pillar__.get('cloud', {})) ) return client
[ "def", "_get_client", "(", ")", ":", "client", "=", "salt", ".", "cloud", ".", "CloudClient", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__opts__", "[", "'conf_file'", "]", ")", ",", "'cloud'", ")", ",", "pil...
Return a cloud client
[ "Return", "a", "cloud", "client" ]
python
train
opengridcc/opengrid
opengrid/library/regression.py
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/regression.py#L157-L219
def _do_analysis_cross_validation(self): """ Find the best model (fit) based on cross-valiation (leave one out) """ assert len(self.df) < 15, "Cross-validation is not implemented if your sample contains more than 15 datapoints" # initialization: first model is the mean, but compute cv correctly. errors = [] response_term = [Term([LookupFactor(self.y)])] model_terms = [Term([])] # empty term is the intercept model_desc = ModelDesc(response_term, model_terms) for i in self.df.index: # make new_fit, compute cross-validation and store error df_ = self.df.drop(i, axis=0) fit = fm.ols(model_desc, data=df_).fit() cross_prediction = self._predict(fit=fit, df=self.df.loc[[i], :]) errors.append(cross_prediction['predicted'] - cross_prediction[self.y]) self._list_of_fits = [fm.ols(model_desc, data=self.df).fit()] self.list_of_cverrors = [np.mean(np.abs(np.array(errors)))] # try to improve the model until no improvements can be found all_model_terms_dict = {x: Term([LookupFactor(x)]) for x in self.list_of_x} while all_model_terms_dict: # import pdb;pdb.set_trace() # try each x in all_exog and overwrite if we find a better one # at the end of iteration (and not earlier), save the best of the iteration better_model_found = False best = dict(fit=self._list_of_fits[-1], cverror=self.list_of_cverrors[-1]) for x, term in all_model_terms_dict.items(): model_desc = ModelDesc(response_term, self._list_of_fits[-1].model.formula.rhs_termlist + [term]) # cross_validation, currently only implemented for monthly data # compute the mean error for a given formula based on leave-one-out. errors = [] for i in self.df.index: # make new_fit, compute cross-validation and store error df_ = self.df.drop(i, axis=0) fit = fm.ols(model_desc, data=df_).fit() cross_prediction = self._predict(fit=fit, df=self.df.loc[[i], :]) errors.append(cross_prediction['predicted'] - cross_prediction[self.y]) cverror = np.mean(np.abs(np.array(errors))) # compare the model with the current fit if cverror < best['cverror']: # better model, keep it # first, reidentify using all the datapoints best['fit'] = fm.ols(model_desc, data=self.df).fit() best['cverror'] = cverror better_model_found = True best_x = x if better_model_found: self._list_of_fits.append(best['fit']) self.list_of_cverrors.append(best['cverror']) else: # if we did not find a better model, exit break # next iteration with the found exog removed all_model_terms_dict.pop(best_x) self._fit = self._list_of_fits[-1]
[ "def", "_do_analysis_cross_validation", "(", "self", ")", ":", "assert", "len", "(", "self", ".", "df", ")", "<", "15", ",", "\"Cross-validation is not implemented if your sample contains more than 15 datapoints\"", "# initialization: first model is the mean, but compute cv correct...
Find the best model (fit) based on cross-valiation (leave one out)
[ "Find", "the", "best", "model", "(", "fit", ")", "based", "on", "cross", "-", "valiation", "(", "leave", "one", "out", ")" ]
python
train
dpa-newslab/livebridge
livebridge/base/sources.py
https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/base/sources.py#L46-L62
async def filter_new_posts(self, source_id, post_ids): """Filters ist of post_id for new ones. :param source_id: id of the source :type string: :param post_ids: list of post ids :type list: :returns: list of unknown post ids.""" new_ids = [] try: db_client = self._db posts_in_db = await db_client.get_known_posts(source_id, post_ids) new_ids = [p for p in post_ids if p not in posts_in_db] except Exception as exc: logger.error("Error when filtering for new posts {} {}".format(source_id, post_ids)) logger.exception(exc) return new_ids
[ "async", "def", "filter_new_posts", "(", "self", ",", "source_id", ",", "post_ids", ")", ":", "new_ids", "=", "[", "]", "try", ":", "db_client", "=", "self", ".", "_db", "posts_in_db", "=", "await", "db_client", ".", "get_known_posts", "(", "source_id", ",...
Filters ist of post_id for new ones. :param source_id: id of the source :type string: :param post_ids: list of post ids :type list: :returns: list of unknown post ids.
[ "Filters", "ist", "of", "post_id", "for", "new", "ones", "." ]
python
train
OpenAgInitiative/openag_python
openag/utils.py
https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/utils.py#L133-L156
def process_params(mod_id, params, type_params): """ Takes as input a dictionary of parameters defined on a module and the information about the required parameters defined on the corresponding module type. Validatates that are required parameters were supplied and fills any missing parameters with their default values from the module type. Returns a nested dictionary of the same format as the `type_params` but with an additional key `value` on each inner dictionary that gives the value of that parameter for this specific module """ res = {} for param_name, param_info in type_params.items(): val = params.get(param_name, param_info.get("default", None)) # Check against explicit None (param could be explicitly False) if val is not None: param_res = dict(param_info) param_res["value"] = val res[param_name] = param_res elif type_params.get("required", False): raise ValueError( 'Required parameter "{}" is not defined for module ' '"{}"'.format(param_name, mod_id) ) return res
[ "def", "process_params", "(", "mod_id", ",", "params", ",", "type_params", ")", ":", "res", "=", "{", "}", "for", "param_name", ",", "param_info", "in", "type_params", ".", "items", "(", ")", ":", "val", "=", "params", ".", "get", "(", "param_name", ",...
Takes as input a dictionary of parameters defined on a module and the information about the required parameters defined on the corresponding module type. Validatates that are required parameters were supplied and fills any missing parameters with their default values from the module type. Returns a nested dictionary of the same format as the `type_params` but with an additional key `value` on each inner dictionary that gives the value of that parameter for this specific module
[ "Takes", "as", "input", "a", "dictionary", "of", "parameters", "defined", "on", "a", "module", "and", "the", "information", "about", "the", "required", "parameters", "defined", "on", "the", "corresponding", "module", "type", ".", "Validatates", "that", "are", ...
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_wp.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_wp.py#L202-L210
def get_default_frame(self): '''default frame for waypoints''' if self.settings.terrainalt == 'Auto': if self.get_mav_param('TERRAIN_FOLLOW',0) == 1: return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT if self.settings.terrainalt == 'True': return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT
[ "def", "get_default_frame", "(", "self", ")", ":", "if", "self", ".", "settings", ".", "terrainalt", "==", "'Auto'", ":", "if", "self", ".", "get_mav_param", "(", "'TERRAIN_FOLLOW'", ",", "0", ")", "==", "1", ":", "return", "mavutil", ".", "mavlink", "."...
default frame for waypoints
[ "default", "frame", "for", "waypoints" ]
python
train
ltworf/typedload
typedload/dataloader.py
https://github.com/ltworf/typedload/blob/7fd130612963bfcec3242698463ef863ca4af927/typedload/dataloader.py#L401-L433
def _unionload(l: Loader, value, type_) -> Any: """ Loads a value into a union. Basically this iterates all the types inside the union, until one that doesn't raise an exception is found. If no suitable type is found, an exception is raised. """ try: args = uniontypes(type_) except AttributeError: raise TypedloadAttributeError('The typing API for this Python version is unknown') # Do not convert basic types, if possible if type(value) in args.intersection(l.basictypes): return value exceptions = [] # Try all types for t in args: try: return l.load(value, t, annotation=Annotation(AnnotationType.UNION, t)) except Exception as e: exceptions.append(e) raise TypedloadValueError( 'Value could not be loaded into %s' % type_, value=value, type_=type_, exceptions=exceptions )
[ "def", "_unionload", "(", "l", ":", "Loader", ",", "value", ",", "type_", ")", "->", "Any", ":", "try", ":", "args", "=", "uniontypes", "(", "type_", ")", "except", "AttributeError", ":", "raise", "TypedloadAttributeError", "(", "'The typing API for this Pytho...
Loads a value into a union. Basically this iterates all the types inside the union, until one that doesn't raise an exception is found. If no suitable type is found, an exception is raised.
[ "Loads", "a", "value", "into", "a", "union", "." ]
python
train
astropy/photutils
photutils/datasets/make.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L26-L78
def apply_poisson_noise(data, random_state=None): """ Apply Poisson noise to an array, where the value of each element in the input array represents the expected number of counts. Each pixel in the output array is generated by drawing a random sample from a Poisson distribution whose expectation value is given by the pixel value in the input array. Parameters ---------- data : array-like The array on which to apply Poisson noise. Every pixel in the array must have a positive value (i.e. counts). random_state : int or `~numpy.random.RandomState`, optional Pseudo-random number generator state used for random sampling. Returns ------- result : `~numpy.ndarray` The data array after applying Poisson noise. See Also -------- make_noise_image Examples -------- .. plot:: :include-source: from photutils.datasets import make_4gaussians_image from photutils.datasets import apply_poisson_noise data1 = make_4gaussians_image(noise=False) data2 = apply_poisson_noise(data1, random_state=12345) # plot the images import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(data1, origin='lower', interpolation='nearest') ax1.set_title('Original image') ax2.imshow(data2, origin='lower', interpolation='nearest') ax2.set_title('Original image with Poisson noise applied') """ data = np.asanyarray(data) if np.any(data < 0): raise ValueError('data must not contain any negative values') prng = check_random_state(random_state) return prng.poisson(data)
[ "def", "apply_poisson_noise", "(", "data", ",", "random_state", "=", "None", ")", ":", "data", "=", "np", ".", "asanyarray", "(", "data", ")", "if", "np", ".", "any", "(", "data", "<", "0", ")", ":", "raise", "ValueError", "(", "'data must not contain an...
Apply Poisson noise to an array, where the value of each element in the input array represents the expected number of counts. Each pixel in the output array is generated by drawing a random sample from a Poisson distribution whose expectation value is given by the pixel value in the input array. Parameters ---------- data : array-like The array on which to apply Poisson noise. Every pixel in the array must have a positive value (i.e. counts). random_state : int or `~numpy.random.RandomState`, optional Pseudo-random number generator state used for random sampling. Returns ------- result : `~numpy.ndarray` The data array after applying Poisson noise. See Also -------- make_noise_image Examples -------- .. plot:: :include-source: from photutils.datasets import make_4gaussians_image from photutils.datasets import apply_poisson_noise data1 = make_4gaussians_image(noise=False) data2 = apply_poisson_noise(data1, random_state=12345) # plot the images import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(data1, origin='lower', interpolation='nearest') ax1.set_title('Original image') ax2.imshow(data2, origin='lower', interpolation='nearest') ax2.set_title('Original image with Poisson noise applied')
[ "Apply", "Poisson", "noise", "to", "an", "array", "where", "the", "value", "of", "each", "element", "in", "the", "input", "array", "represents", "the", "expected", "number", "of", "counts", "." ]
python
train
openstack/monasca-common
monasca_common/policy/policy_engine.py
https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/policy/policy_engine.py#L162-L174
def set_rules(rules, overwrite=True, use_conf=False): # pragma: no cover """Set rules based on the provided dict of rules. Note: Used in tests only. :param rules: New rules to use. It should be an instance of dict :param overwrite: Whether to overwrite current rules or update them with the new rules. :param use_conf: Whether to reload rules from config file. """ init(use_conf=False) _ENFORCER.set_rules(rules, overwrite, use_conf)
[ "def", "set_rules", "(", "rules", ",", "overwrite", "=", "True", ",", "use_conf", "=", "False", ")", ":", "# pragma: no cover", "init", "(", "use_conf", "=", "False", ")", "_ENFORCER", ".", "set_rules", "(", "rules", ",", "overwrite", ",", "use_conf", ")" ...
Set rules based on the provided dict of rules. Note: Used in tests only. :param rules: New rules to use. It should be an instance of dict :param overwrite: Whether to overwrite current rules or update them with the new rules. :param use_conf: Whether to reload rules from config file.
[ "Set", "rules", "based", "on", "the", "provided", "dict", "of", "rules", "." ]
python
train
genialis/resolwe
resolwe/process/fields.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/fields.py#L261-L281
def import_file(self, imported_format=None, progress_from=0.0, progress_to=None): """Import field source file to working directory. :param imported_format: Import file format (extracted, compressed or both) :param progress_from: Initial progress value :param progress_to: Final progress value :return: Destination file path (if extracted and compressed, extracted path given) """ if not hasattr(resolwe_runtime_utils, 'import_file'): raise RuntimeError('Requires resolwe-runtime-utils >= 2.0.0') if imported_format is None: imported_format = resolwe_runtime_utils.ImportedFormat.BOTH return resolwe_runtime_utils.import_file( src=self.file_temp, file_name=self.path, imported_format=imported_format, progress_from=progress_from, progress_to=progress_to )
[ "def", "import_file", "(", "self", ",", "imported_format", "=", "None", ",", "progress_from", "=", "0.0", ",", "progress_to", "=", "None", ")", ":", "if", "not", "hasattr", "(", "resolwe_runtime_utils", ",", "'import_file'", ")", ":", "raise", "RuntimeError", ...
Import field source file to working directory. :param imported_format: Import file format (extracted, compressed or both) :param progress_from: Initial progress value :param progress_to: Final progress value :return: Destination file path (if extracted and compressed, extracted path given)
[ "Import", "field", "source", "file", "to", "working", "directory", "." ]
python
train
opendatateam/udata
udata/core/dataset/models.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L602-L615
def add_resource(self, resource): '''Perform an atomic prepend for a new resource''' resource.validate() self.update(__raw__={ '$push': { 'resources': { '$each': [resource.to_mongo()], '$position': 0 } } }) self.reload() post_save.send(self.__class__, document=self, resource_added=resource.id)
[ "def", "add_resource", "(", "self", ",", "resource", ")", ":", "resource", ".", "validate", "(", ")", "self", ".", "update", "(", "__raw__", "=", "{", "'$push'", ":", "{", "'resources'", ":", "{", "'$each'", ":", "[", "resource", ".", "to_mongo", "(", ...
Perform an atomic prepend for a new resource
[ "Perform", "an", "atomic", "prepend", "for", "a", "new", "resource" ]
python
train
pytroll/satpy
satpy/composites/viirs.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/composites/viirs.py#L303-L388
def _run_dnb_normalization(self, dnb_data, sza_data): """Scale the DNB data using a adaptive histogram equalization method. Args: dnb_data (ndarray): Day/Night Band data array sza_data (ndarray): Solar Zenith Angle data array """ # convert dask arrays to DataArray objects dnb_data = xr.DataArray(dnb_data, dims=('y', 'x')) sza_data = xr.DataArray(sza_data, dims=('y', 'x')) good_mask = ~(dnb_data.isnull() | sza_data.isnull()) # good_mask = ~(dnb_data.mask | sza_data.mask) output_dataset = dnb_data.where(good_mask) # we only need the numpy array output_dataset = output_dataset.values.copy() dnb_data = dnb_data.values sza_data = sza_data.values day_mask, mixed_mask, night_mask = make_day_night_masks( sza_data, good_mask.values, self.high_angle_cutoff, self.low_angle_cutoff, stepsDegrees=self.mixed_degree_step) did_equalize = False has_multi_times = len(mixed_mask) > 0 if day_mask.any(): did_equalize = True if self.adaptive_day == "always" or ( has_multi_times and self.adaptive_day == "multiple"): LOG.debug("Adaptive histogram equalizing DNB day data...") local_histogram_equalization( dnb_data, day_mask, valid_data_mask=good_mask.values, local_radius_px=self.day_radius_pixels, out=output_dataset) else: LOG.debug("Histogram equalizing DNB day data...") histogram_equalization(dnb_data, day_mask, out=output_dataset) if mixed_mask: for mask in mixed_mask: if mask.any(): did_equalize = True if self.adaptive_mixed == "always" or ( has_multi_times and self.adaptive_mixed == "multiple"): LOG.debug( "Adaptive histogram equalizing DNB mixed data...") local_histogram_equalization( dnb_data, mask, valid_data_mask=good_mask.values, local_radius_px=self.mixed_radius_pixels, out=output_dataset) else: LOG.debug("Histogram equalizing DNB mixed data...") histogram_equalization(dnb_data, day_mask, out=output_dataset) if night_mask.any(): did_equalize = True if self.adaptive_night == "always" or ( has_multi_times and self.adaptive_night == "multiple"): LOG.debug("Adaptive histogram equalizing DNB night data...") local_histogram_equalization( dnb_data, night_mask, valid_data_mask=good_mask.values, local_radius_px=self.night_radius_pixels, out=output_dataset) else: LOG.debug("Histogram equalizing DNB night data...") histogram_equalization(dnb_data, night_mask, out=output_dataset) if not did_equalize: raise RuntimeError("No valid data found to histogram equalize") return output_dataset
[ "def", "_run_dnb_normalization", "(", "self", ",", "dnb_data", ",", "sza_data", ")", ":", "# convert dask arrays to DataArray objects", "dnb_data", "=", "xr", ".", "DataArray", "(", "dnb_data", ",", "dims", "=", "(", "'y'", ",", "'x'", ")", ")", "sza_data", "=...
Scale the DNB data using a adaptive histogram equalization method. Args: dnb_data (ndarray): Day/Night Band data array sza_data (ndarray): Solar Zenith Angle data array
[ "Scale", "the", "DNB", "data", "using", "a", "adaptive", "histogram", "equalization", "method", "." ]
python
train
twisted/epsilon
epsilon/ampauth.py
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/ampauth.py#L222-L229
def connectionLost(self, reason): """ If a login has happened, perform a logout. """ AMP.connectionLost(self, reason) if self.logout is not None: self.logout() self.boxReceiver = self.logout = None
[ "def", "connectionLost", "(", "self", ",", "reason", ")", ":", "AMP", ".", "connectionLost", "(", "self", ",", "reason", ")", "if", "self", ".", "logout", "is", "not", "None", ":", "self", ".", "logout", "(", ")", "self", ".", "boxReceiver", "=", "se...
If a login has happened, perform a logout.
[ "If", "a", "login", "has", "happened", "perform", "a", "logout", "." ]
python
train
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L4873-L4881
def alter_function(self, dbName, funcName, newFunc): """ Parameters: - dbName - funcName - newFunc """ self.send_alter_function(dbName, funcName, newFunc) self.recv_alter_function()
[ "def", "alter_function", "(", "self", ",", "dbName", ",", "funcName", ",", "newFunc", ")", ":", "self", ".", "send_alter_function", "(", "dbName", ",", "funcName", ",", "newFunc", ")", "self", ".", "recv_alter_function", "(", ")" ]
Parameters: - dbName - funcName - newFunc
[ "Parameters", ":", "-", "dbName", "-", "funcName", "-", "newFunc" ]
python
train
appknox/google-chartwrapper
GChartWrapper/GChart.py
https://github.com/appknox/google-chartwrapper/blob/3769aecbef6c83b6cd93ee72ece478ffe433ac57/GChartWrapper/GChart.py#L271-L283
def scale(self, *args): """ Scales the data down to the given size args must be of the form:: <data set 1 minimum value>, <data set 1 maximum value>, <data set n minimum value>, <data set n maximum value> will only work with text encoding! APIPARAM: chds """ self._scale = [','.join(map(smart_str, args))] return self
[ "def", "scale", "(", "self", ",", "*", "args", ")", ":", "self", ".", "_scale", "=", "[", "','", ".", "join", "(", "map", "(", "smart_str", ",", "args", ")", ")", "]", "return", "self" ]
Scales the data down to the given size args must be of the form:: <data set 1 minimum value>, <data set 1 maximum value>, <data set n minimum value>, <data set n maximum value> will only work with text encoding! APIPARAM: chds
[ "Scales", "the", "data", "down", "to", "the", "given", "size", "args", "must", "be", "of", "the", "form", "::", "<data", "set", "1", "minimum", "value", ">", "<data", "set", "1", "maximum", "value", ">", "<data", "set", "n", "minimum", "value", ">", ...
python
test
cnobile2012/pololu-motors
pololu/motors/qik2s9v1.py
https://github.com/cnobile2012/pololu-motors/blob/453d2283a63cfe15cda96cad6dffa73372d52a7c/pololu/motors/qik2s9v1.py#L106-L123
def getError(self, device=DEFAULT_DEVICE_ID, message=True): """ Get the error message or value stored in the Qik 2s9v1 hardware. :Keywords: device : `int` The device is the integer number of the hardware devices ID and is only used with the Pololu Protocol. Defaults to the hardware's default value. message : `bool` If set to `True` a text message will be returned, if set to `False` the integer stored in the Qik will be returned. :Returns: A list of text messages, integers, or and empty list. See the `message` parameter above. """ return self._getError(device, message)
[ "def", "getError", "(", "self", ",", "device", "=", "DEFAULT_DEVICE_ID", ",", "message", "=", "True", ")", ":", "return", "self", ".", "_getError", "(", "device", ",", "message", ")" ]
Get the error message or value stored in the Qik 2s9v1 hardware. :Keywords: device : `int` The device is the integer number of the hardware devices ID and is only used with the Pololu Protocol. Defaults to the hardware's default value. message : `bool` If set to `True` a text message will be returned, if set to `False` the integer stored in the Qik will be returned. :Returns: A list of text messages, integers, or and empty list. See the `message` parameter above.
[ "Get", "the", "error", "message", "or", "value", "stored", "in", "the", "Qik", "2s9v1", "hardware", "." ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L572-L578
def normalize(self): """Replace rectangle with its finite version.""" if self.x1 < self.x0: self.x0, self.x1 = self.x1, self.x0 if self.y1 < self.y0: self.y0, self.y1 = self.y1, self.y0 return self
[ "def", "normalize", "(", "self", ")", ":", "if", "self", ".", "x1", "<", "self", ".", "x0", ":", "self", ".", "x0", ",", "self", ".", "x1", "=", "self", ".", "x1", ",", "self", ".", "x0", "if", "self", ".", "y1", "<", "self", ".", "y0", ":"...
Replace rectangle with its finite version.
[ "Replace", "rectangle", "with", "its", "finite", "version", "." ]
python
train
wmayner/pyphi
pyphi/models/cuts.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/models/cuts.py#L48-L57
def apply_cut(self, cm): """Return a modified connectivity matrix with all connections that are severed by this cut removed. Args: cm (np.ndarray): A connectivity matrix. """ # Invert the cut matrix, creating a matrix of preserved connections inverse = np.logical_not(self.cut_matrix(cm.shape[0])).astype(int) return cm * inverse
[ "def", "apply_cut", "(", "self", ",", "cm", ")", ":", "# Invert the cut matrix, creating a matrix of preserved connections", "inverse", "=", "np", ".", "logical_not", "(", "self", ".", "cut_matrix", "(", "cm", ".", "shape", "[", "0", "]", ")", ")", ".", "astyp...
Return a modified connectivity matrix with all connections that are severed by this cut removed. Args: cm (np.ndarray): A connectivity matrix.
[ "Return", "a", "modified", "connectivity", "matrix", "with", "all", "connections", "that", "are", "severed", "by", "this", "cut", "removed", "." ]
python
train
mardix/Yass
yass/yass.py
https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L182-L189
def _url_to(self, page): """ Get the url of a page """ anchor = "" if "#" in page: page, anchor = page.split("#") anchor = "#" + anchor meta = self._get_page_meta(page) return meta.get("url")
[ "def", "_url_to", "(", "self", ",", "page", ")", ":", "anchor", "=", "\"\"", "if", "\"#\"", "in", "page", ":", "page", ",", "anchor", "=", "page", ".", "split", "(", "\"#\"", ")", "anchor", "=", "\"#\"", "+", "anchor", "meta", "=", "self", ".", "...
Get the url of a page
[ "Get", "the", "url", "of", "a", "page" ]
python
train
pypa/setuptools
setuptools/command/easy_install.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/easy_install.py#L596-L608
def install_egg_scripts(self, dist): """Write all the scripts for `dist`, unless scripts are excluded""" if not self.exclude_scripts and dist.metadata_isdir('scripts'): for script_name in dist.metadata_listdir('scripts'): if dist.metadata_isdir('scripts/' + script_name): # The "script" is a directory, likely a Python 3 # __pycache__ directory, so skip it. continue self.install_script( dist, script_name, dist.get_metadata('scripts/' + script_name) ) self.install_wrapper_scripts(dist)
[ "def", "install_egg_scripts", "(", "self", ",", "dist", ")", ":", "if", "not", "self", ".", "exclude_scripts", "and", "dist", ".", "metadata_isdir", "(", "'scripts'", ")", ":", "for", "script_name", "in", "dist", ".", "metadata_listdir", "(", "'scripts'", ")...
Write all the scripts for `dist`, unless scripts are excluded
[ "Write", "all", "the", "scripts", "for", "dist", "unless", "scripts", "are", "excluded" ]
python
train
spyder-ide/spyder
spyder/widgets/fileswitcher.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/fileswitcher.py#L493-L500
def select_row(self, steps): """Select row in list widget based on a number of steps with direction. Steps can be positive (next rows) or negative (previous rows). """ row = self.current_row() + steps if 0 <= row < self.count(): self.set_current_row(row)
[ "def", "select_row", "(", "self", ",", "steps", ")", ":", "row", "=", "self", ".", "current_row", "(", ")", "+", "steps", "if", "0", "<=", "row", "<", "self", ".", "count", "(", ")", ":", "self", ".", "set_current_row", "(", "row", ")" ]
Select row in list widget based on a number of steps with direction. Steps can be positive (next rows) or negative (previous rows).
[ "Select", "row", "in", "list", "widget", "based", "on", "a", "number", "of", "steps", "with", "direction", "." ]
python
train
numenta/nupic
src/nupic/support/decorators.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/decorators.py#L69-L137
def logEntryExit(getLoggerCallback=logging.getLogger, entryExitLogLevel=logging.DEBUG, logArgs=False, logTraceback=False): """ Returns a closure suitable for use as function/method decorator for logging entry/exit of function/method. getLoggerCallback: user-supplied callback function that takes no args and returns the logger instance to use for logging. entryExitLogLevel: Log level for logging entry/exit of decorated function; e.g., logging.DEBUG; pass None to disable entry/exit logging. logArgs: If True, also log args logTraceback: If True, also log Traceback information Usage Examples: NOTE: logging must be initialized *before* any loggers are created, else there will be no output; see nupic.support.initLogging() @logEntryExit() def myFunctionBar(): ... @logEntryExit(logTraceback=True) @logExceptions() def myFunctionGamma(): ... raise RuntimeError("something bad happened") ... """ def entryExitLoggingDecorator(func): @functools.wraps(func) def entryExitLoggingWrap(*args, **kwargs): if entryExitLogLevel is None: enabled = False else: logger = getLoggerCallback() enabled = logger.isEnabledFor(entryExitLogLevel) if not enabled: return func(*args, **kwargs) funcName = str(func) if logArgs: argsRepr = ', '.join( [repr(a) for a in args] + ['%s=%r' % (k,v,) for k,v in kwargs.iteritems()]) else: argsRepr = '' logger.log( entryExitLogLevel, "ENTERING: %s(%s)%s", funcName, argsRepr, '' if not logTraceback else '; ' + repr(traceback.format_stack())) try: return func(*args, **kwargs) finally: logger.log( entryExitLogLevel, "LEAVING: %s(%s)%s", funcName, argsRepr, '' if not logTraceback else '; ' + repr(traceback.format_stack())) return entryExitLoggingWrap return entryExitLoggingDecorator
[ "def", "logEntryExit", "(", "getLoggerCallback", "=", "logging", ".", "getLogger", ",", "entryExitLogLevel", "=", "logging", ".", "DEBUG", ",", "logArgs", "=", "False", ",", "logTraceback", "=", "False", ")", ":", "def", "entryExitLoggingDecorator", "(", "func",...
Returns a closure suitable for use as function/method decorator for logging entry/exit of function/method. getLoggerCallback: user-supplied callback function that takes no args and returns the logger instance to use for logging. entryExitLogLevel: Log level for logging entry/exit of decorated function; e.g., logging.DEBUG; pass None to disable entry/exit logging. logArgs: If True, also log args logTraceback: If True, also log Traceback information Usage Examples: NOTE: logging must be initialized *before* any loggers are created, else there will be no output; see nupic.support.initLogging() @logEntryExit() def myFunctionBar(): ... @logEntryExit(logTraceback=True) @logExceptions() def myFunctionGamma(): ... raise RuntimeError("something bad happened") ...
[ "Returns", "a", "closure", "suitable", "for", "use", "as", "function", "/", "method", "decorator", "for", "logging", "entry", "/", "exit", "of", "function", "/", "method", "." ]
python
valid
saltstack/salt
salt/runners/bgp.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/bgp.py#L232-L409
def neighbors(*asns, **kwargs): ''' Search for BGP neighbors details in the mines of the ``bgp.neighbors`` function. Arguments: asns A list of AS numbers to search for. The runner will return only the neighbors of these AS numbers. device Filter by device name (minion ID). ip Search BGP neighbor using the IP address. In multi-VRF environments, the same IP address could be used by more than one neighbors, in different routing tables. network Search neighbors within a certain IP network. title Custom title. display: ``True`` Display on the screen or return structured object? Default: ``True`` (return on the CLI). outputter: ``table`` Specify the outputter name when displaying on the CLI. Default: :mod:`table <salt.output.table_out>`. In addition, any field from the output of the ``neighbors`` function from the :mod:`NAPALM BGP module <salt.modules.napalm_bgp.neighbors>` can be used as a filter. CLI Example: .. code-block:: bash salt-run bgp.neighbors 13335 15169 salt-run bgp.neighbors 13335 ip=172.17.19.1 salt-run bgp.neighbors multipath=True salt-run bgp.neighbors up=False export_policy=my-export-policy multihop=False salt-run bgp.neighbors network=192.168.0.0/16 Output example: .. code-block:: text BGP Neighbors for 13335, 15169 ________________________________________________________________________________________________________________________________________________________________ | Device | AS Number | Neighbor Address | State|#Active/Received/Accepted/Damped | Policy IN | Policy OUT | ________________________________________________________________________________________________________________________________________________________________ | edge01.bjm01 | 13335 | 172.17.109.11 | Established 0/398/398/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.bjm01 | 13335 | 172.17.109.12 | Established 397/398/398/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.flw01 | 13335 | 192.168.172.11 | Established 1/398/398/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.oua01 | 13335 | 172.17.109.17 | Established 0/0/0/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.bjm01 | 15169 | 2001::1 | Established 102/102/102/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.bjm01 | 15169 | 2001::2 | Established 102/102/102/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.tbg01 | 13335 | 192.168.172.17 | Established 0/1/1/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ ''' opts = _get_bgp_runner_opts() title = kwargs.pop('title', None) display = kwargs.pop('display', opts['display']) outputter = kwargs.pop('outputter', opts['outputter']) # cleaning up the kwargs # __pub args not used in this runner (yet) kwargs_copy = {} kwargs_copy.update(kwargs) for karg, _ in six.iteritems(kwargs_copy): if karg.startswith('__pub'): kwargs.pop(karg) if not asns and not kwargs: if display: print('Please specify at least an AS Number or an output filter') return [] device = kwargs.pop('device', None) neighbor_ip = kwargs.pop('ip', None) ipnet = kwargs.pop('network', None) ipnet_obj = IPNetwork(ipnet) if ipnet else None # any other key passed on the CLI can be used as a filter rows = [] # building the labels labels = {} for field in opts['return_fields']: if field in _DEFAULT_LABELS_MAPPING: labels[field] = _DEFAULT_LABELS_MAPPING[field] else: # transform from 'previous_connection_state' to 'Previous Connection State' labels[field] = ' '.join(map(lambda word: word.title(), field.split('_'))) display_fields = list(set(opts['return_fields']) - set(_DEFAULT_INCLUDED_FIELDS)) get_bgp_neighbors_all = _get_mine(opts=opts) if not title: title_parts = [] if asns: title_parts.append('BGP Neighbors for {asns}'.format( asns=', '.join([six.text_type(asn) for asn in asns]) )) if neighbor_ip: title_parts.append('Selecting neighbors having the remote IP address: {ipaddr}'.format(ipaddr=neighbor_ip)) if ipnet: title_parts.append('Selecting neighbors within the IP network: {ipnet}'.format(ipnet=ipnet)) if kwargs: title_parts.append('Searching for BGP neighbors having the attributes: {attrmap}'.format( attrmap=', '.join(map(lambda key: '{key}={value}'.format(key=key, value=kwargs[key]), kwargs)) )) title = '\n'.join(title_parts) for minion, get_bgp_neighbors_minion in six.iteritems(get_bgp_neighbors_all): # pylint: disable=too-many-nested-blocks if not get_bgp_neighbors_minion.get('result'): continue # ignore empty or failed mines if device and minion != device: # when requested to display only the neighbors on a certain device continue get_bgp_neighbors_minion_out = get_bgp_neighbors_minion.get('out', {}) for vrf, vrf_bgp_neighbors in six.iteritems(get_bgp_neighbors_minion_out): # pylint: disable=unused-variable for asn, get_bgp_neighbors_minion_asn in six.iteritems(vrf_bgp_neighbors): if asns and asn not in asns: # if filtering by AS number(s), # will ignore if this AS number key not in that list # and continue the search continue for neighbor in get_bgp_neighbors_minion_asn: if kwargs and not _compare_match(kwargs, neighbor): # requested filtering by neighbors stats # but this one does not correspond continue if neighbor_ip and neighbor_ip != neighbor.get('remote_address'): # requested filtering by neighbors IP addr continue if ipnet_obj and neighbor.get('remote_address'): neighbor_ip_obj = IPAddress(neighbor.get('remote_address')) if neighbor_ip_obj not in ipnet_obj: # Neighbor not in this network continue row = { 'device': minion, 'neighbor_address': neighbor.get('remote_address'), 'as_number': asn } if 'vrf' in display_fields: row['vrf'] = vrf if 'connection_stats' in display_fields: connection_stats = '{state} {active}/{received}/{accepted}/{damped}'.format( state=neighbor.get('connection_state', -1), active=neighbor.get('active_prefix_count', -1), received=neighbor.get('received_prefix_count', -1), accepted=neighbor.get('accepted_prefix_count', -1), damped=neighbor.get('suppressed_prefix_count', -1), ) row['connection_stats'] = connection_stats if 'interface_description' in display_fields or 'interface_name' in display_fields: net_find = __salt__['net.interfaces'](device=minion, ipnet=neighbor.get('remote_address'), display=False) if net_find: if 'interface_description' in display_fields: row['interface_description'] = net_find[0]['interface_description'] if 'interface_name' in display_fields: row['interface_name'] = net_find[0]['interface'] else: # if unable to find anything, leave blank if 'interface_description' in display_fields: row['interface_description'] = '' if 'interface_name' in display_fields: row['interface_name'] = '' for field in display_fields: if field in neighbor: row[field] = neighbor[field] rows.append(row) return _display_runner(rows, labels, title, display=display, outputter=outputter)
[ "def", "neighbors", "(", "*", "asns", ",", "*", "*", "kwargs", ")", ":", "opts", "=", "_get_bgp_runner_opts", "(", ")", "title", "=", "kwargs", ".", "pop", "(", "'title'", ",", "None", ")", "display", "=", "kwargs", ".", "pop", "(", "'display'", ",",...
Search for BGP neighbors details in the mines of the ``bgp.neighbors`` function. Arguments: asns A list of AS numbers to search for. The runner will return only the neighbors of these AS numbers. device Filter by device name (minion ID). ip Search BGP neighbor using the IP address. In multi-VRF environments, the same IP address could be used by more than one neighbors, in different routing tables. network Search neighbors within a certain IP network. title Custom title. display: ``True`` Display on the screen or return structured object? Default: ``True`` (return on the CLI). outputter: ``table`` Specify the outputter name when displaying on the CLI. Default: :mod:`table <salt.output.table_out>`. In addition, any field from the output of the ``neighbors`` function from the :mod:`NAPALM BGP module <salt.modules.napalm_bgp.neighbors>` can be used as a filter. CLI Example: .. code-block:: bash salt-run bgp.neighbors 13335 15169 salt-run bgp.neighbors 13335 ip=172.17.19.1 salt-run bgp.neighbors multipath=True salt-run bgp.neighbors up=False export_policy=my-export-policy multihop=False salt-run bgp.neighbors network=192.168.0.0/16 Output example: .. code-block:: text BGP Neighbors for 13335, 15169 ________________________________________________________________________________________________________________________________________________________________ | Device | AS Number | Neighbor Address | State|#Active/Received/Accepted/Damped | Policy IN | Policy OUT | ________________________________________________________________________________________________________________________________________________________________ | edge01.bjm01 | 13335 | 172.17.109.11 | Established 0/398/398/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.bjm01 | 13335 | 172.17.109.12 | Established 397/398/398/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.flw01 | 13335 | 192.168.172.11 | Established 1/398/398/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.oua01 | 13335 | 172.17.109.17 | Established 0/0/0/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.bjm01 | 15169 | 2001::1 | Established 102/102/102/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.bjm01 | 15169 | 2001::2 | Established 102/102/102/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________ | edge01.tbg01 | 13335 | 192.168.172.17 | Established 0/1/1/0 | import-policy | export-policy | ________________________________________________________________________________________________________________________________________________________________
[ "Search", "for", "BGP", "neighbors", "details", "in", "the", "mines", "of", "the", "bgp", ".", "neighbors", "function", "." ]
python
train
CamDavidsonPilon/lifelines
lifelines/generate_datasets.py
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/generate_datasets.py#L249-L289
def generate_random_lifetimes(hazard_rates, timelines, size=1, censor=None): """ Based on the hazard rates, compute random variables from the survival function hazard_rates: (n,t) array of hazard rates timelines: (t,) the observation times size: the number to return, per hardard rate censor: If True, adds uniform censoring between timelines.max() and 0 If a postive number, censors all events above that value. If (n,) np.array >=0 , censor elementwise. Returns ------- survival_times: (size,n) array of random variables. (optional) censorship: if censor is true, returns (size,n) array with bool True if the death was observed (not right-censored) """ n = hazard_rates.shape[1] survival_times = np.empty((n, size)) cumulative_hazards = cumulative_integral(hazard_rates.values, timelines).T for i in range(size): u = random.rand(n, 1) e = -np.log(u) v = (e - cumulative_hazards) < 0 cross = v.argmax(1) survival_times[:, i] = timelines[cross] survival_times[cross == 0, i] = np.inf if censor is not None: if isinstance(censor, bool): T = timelines.max() rv = T * random.uniform(size=survival_times.shape) else: rv = censor observed = np.less_equal(survival_times, rv) survival_times = np.minimum(rv, survival_times) return survival_times.T, observed.T else: return survival_times
[ "def", "generate_random_lifetimes", "(", "hazard_rates", ",", "timelines", ",", "size", "=", "1", ",", "censor", "=", "None", ")", ":", "n", "=", "hazard_rates", ".", "shape", "[", "1", "]", "survival_times", "=", "np", ".", "empty", "(", "(", "n", ","...
Based on the hazard rates, compute random variables from the survival function hazard_rates: (n,t) array of hazard rates timelines: (t,) the observation times size: the number to return, per hardard rate censor: If True, adds uniform censoring between timelines.max() and 0 If a postive number, censors all events above that value. If (n,) np.array >=0 , censor elementwise. Returns ------- survival_times: (size,n) array of random variables. (optional) censorship: if censor is true, returns (size,n) array with bool True if the death was observed (not right-censored)
[ "Based", "on", "the", "hazard", "rates", "compute", "random", "variables", "from", "the", "survival", "function", "hazard_rates", ":", "(", "n", "t", ")", "array", "of", "hazard", "rates", "timelines", ":", "(", "t", ")", "the", "observation", "times", "si...
python
train
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L691-L705
def retrieve(self, id) : """ Retrieve a single lead Returns a single lead available to the user, according to the unique lead ID provided If the specified lead does not exist, this query returns an error :calls: ``get /leads/{id}`` :param int id: Unique identifier of a Lead. :return: Dictionary that support attriubte-style access and represent Lead resource. :rtype: dict """ _, _, lead = self.http_client.get("/leads/{id}".format(id=id)) return lead
[ "def", "retrieve", "(", "self", ",", "id", ")", ":", "_", ",", "_", ",", "lead", "=", "self", ".", "http_client", ".", "get", "(", "\"/leads/{id}\"", ".", "format", "(", "id", "=", "id", ")", ")", "return", "lead" ]
Retrieve a single lead Returns a single lead available to the user, according to the unique lead ID provided If the specified lead does not exist, this query returns an error :calls: ``get /leads/{id}`` :param int id: Unique identifier of a Lead. :return: Dictionary that support attriubte-style access and represent Lead resource. :rtype: dict
[ "Retrieve", "a", "single", "lead" ]
python
train
nitely/v8-cffi
v8cffi/context.py
https://github.com/nitely/v8-cffi/blob/e3492e7eaacb30be75999c24413aa15eeab57a5d/v8cffi/context.py#L22-L38
def _is_utf_8(txt): """ Check a string is utf-8 encoded :param bytes txt: utf-8 string :return: Whether the string\ is utf-8 encoded or not :rtype: bool """ assert isinstance(txt, six.binary_type) try: _ = six.text_type(txt, 'utf-8') except (TypeError, UnicodeEncodeError): return False else: return True
[ "def", "_is_utf_8", "(", "txt", ")", ":", "assert", "isinstance", "(", "txt", ",", "six", ".", "binary_type", ")", "try", ":", "_", "=", "six", ".", "text_type", "(", "txt", ",", "'utf-8'", ")", "except", "(", "TypeError", ",", "UnicodeEncodeError", ")...
Check a string is utf-8 encoded :param bytes txt: utf-8 string :return: Whether the string\ is utf-8 encoded or not :rtype: bool
[ "Check", "a", "string", "is", "utf", "-", "8", "encoded" ]
python
train
quantopian/zipline
zipline/data/continuous_future_reader.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/continuous_future_reader.py#L204-L282
def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- fields : list of str 'open', 'high', 'low', 'close', or 'volume' start_dt: Timestamp Beginning of the window range. end_dt: Timestamp End of the window range. sids : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ rolls_by_asset = {} tc = self.trading_calendar start_session = tc.minute_to_session_label(start_date) end_session = tc.minute_to_session_label(end_date) for asset in assets: rf = self._roll_finders[asset.roll_style] rolls_by_asset[asset] = rf.get_rolls( asset.root_symbol, start_session, end_session, asset.offset) sessions = tc.sessions_in_range(start_date, end_date) minutes = tc.minutes_in_range(start_date, end_date) num_minutes = len(minutes) shape = num_minutes, len(assets) results = [] # Get partitions partitions_by_asset = {} for asset in assets: partitions = [] partitions_by_asset[asset] = partitions rolls = rolls_by_asset[asset] start = start_date for roll in rolls: sid, roll_date = roll start_loc = minutes.searchsorted(start) if roll_date is not None: _, end = tc.open_and_close_for_session( roll_date - sessions.freq) end_loc = minutes.searchsorted(end) else: end = end_date end_loc = len(minutes) - 1 partitions.append((sid, start, end, start_loc, end_loc)) if roll[-1] is not None: start, _ = tc.open_and_close_for_session( tc.minute_to_session_label(minutes[end_loc + 1])) for column in columns: if column != 'volume': out = np.full(shape, np.nan) else: out = np.zeros(shape, dtype=np.uint32) for i, asset in enumerate(assets): partitions = partitions_by_asset[asset] for sid, start, end, start_loc, end_loc in partitions: if column != 'sid': result = self._bar_reader.load_raw_arrays( [column], start, end, [sid])[0][:, 0] else: result = int(sid) out[start_loc:end_loc + 1, i] = result results.append(out) return results
[ "def", "load_raw_arrays", "(", "self", ",", "columns", ",", "start_date", ",", "end_date", ",", "assets", ")", ":", "rolls_by_asset", "=", "{", "}", "tc", "=", "self", ".", "trading_calendar", "start_session", "=", "tc", ".", "minute_to_session_label", "(", ...
Parameters ---------- fields : list of str 'open', 'high', 'low', 'close', or 'volume' start_dt: Timestamp Beginning of the window range. end_dt: Timestamp End of the window range. sids : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range.
[ "Parameters", "----------", "fields", ":", "list", "of", "str", "open", "high", "low", "close", "or", "volume", "start_dt", ":", "Timestamp", "Beginning", "of", "the", "window", "range", ".", "end_dt", ":", "Timestamp", "End", "of", "the", "window", "range",...
python
train
nilp0inter/cpe
cpe/comp/cpecomp1_1.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp1_1.py#L156-L183
def _decode(self): """ Convert the encoded value of component to standard value (WFN value). """ s = self._encoded_value elements = s.replace('~', '').split('!') dec_elements = [] for elem in elements: result = [] idx = 0 while (idx < len(elem)): # Get the idx'th character of s c = elem[idx] if (c in CPEComponent1_1.NON_STANDARD_VALUES): # Escape character result.append("\\") result.append(c) else: # Do nothing result.append(c) idx += 1 dec_elements.append("".join(result)) self._standard_value = dec_elements
[ "def", "_decode", "(", "self", ")", ":", "s", "=", "self", ".", "_encoded_value", "elements", "=", "s", ".", "replace", "(", "'~'", ",", "''", ")", ".", "split", "(", "'!'", ")", "dec_elements", "=", "[", "]", "for", "elem", "in", "elements", ":", ...
Convert the encoded value of component to standard value (WFN value).
[ "Convert", "the", "encoded", "value", "of", "component", "to", "standard", "value", "(", "WFN", "value", ")", "." ]
python
train
basvandenbroek/gcloud_taskqueue
gcloud_taskqueue/task.py
https://github.com/basvandenbroek/gcloud_taskqueue/blob/b147b57f7c0ad9e8030ee9797d6526a448aa5007/gcloud_taskqueue/task.py#L93-L106
def delete(self, client=None): """Deletes a task from Task Queue. :type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the task's taskqueue. :rtype: :class:`Task` :returns: The task that was just deleted. :raises: :class:`gcloud.exceptions.NotFound` (propagated from :meth:`gcloud.taskqueue.taskqueue.Taskqueue.delete_task`). """ return self.taskqueue.delete_task(self.id, client=client)
[ "def", "delete", "(", "self", ",", "client", "=", "None", ")", ":", "return", "self", ".", "taskqueue", ".", "delete_task", "(", "self", ".", "id", ",", "client", "=", "client", ")" ]
Deletes a task from Task Queue. :type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the task's taskqueue. :rtype: :class:`Task` :returns: The task that was just deleted. :raises: :class:`gcloud.exceptions.NotFound` (propagated from :meth:`gcloud.taskqueue.taskqueue.Taskqueue.delete_task`).
[ "Deletes", "a", "task", "from", "Task", "Queue", "." ]
python
train
pgmpy/pgmpy
pgmpy/readwrite/XMLBeliefNetwork.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/XMLBeliefNetwork.py#L85-L95
def get_static_properties(self): """ Returns a dictionary of STATICPROPERTIES Examples -------- >>> reader = XBNReader('xbn_test.xml') >>> reader.get_static_properties() {'FORMAT': 'MSR DTAS XML', 'VERSION': '0.2', 'CREATOR': 'Microsoft Research DTAS'} """ return {tags.tag: tags.get('VALUE') for tags in self.bnmodel.find('STATICPROPERTIES')}
[ "def", "get_static_properties", "(", "self", ")", ":", "return", "{", "tags", ".", "tag", ":", "tags", ".", "get", "(", "'VALUE'", ")", "for", "tags", "in", "self", ".", "bnmodel", ".", "find", "(", "'STATICPROPERTIES'", ")", "}" ]
Returns a dictionary of STATICPROPERTIES Examples -------- >>> reader = XBNReader('xbn_test.xml') >>> reader.get_static_properties() {'FORMAT': 'MSR DTAS XML', 'VERSION': '0.2', 'CREATOR': 'Microsoft Research DTAS'}
[ "Returns", "a", "dictionary", "of", "STATICPROPERTIES" ]
python
train
google/grr
grr/core/grr_response_core/lib/lexer.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/lexer.py#L84-L97
def Reset(self): """Reset the lexer to process a new data feed.""" # The first state self.state = "INITIAL" self.state_stack = [] # The buffer we are parsing now self.buffer = "" self.error = 0 self.verbose = 0 # The index into the buffer where we are currently pointing self.processed = 0 self.processed_buffer = ""
[ "def", "Reset", "(", "self", ")", ":", "# The first state", "self", ".", "state", "=", "\"INITIAL\"", "self", ".", "state_stack", "=", "[", "]", "# The buffer we are parsing now", "self", ".", "buffer", "=", "\"\"", "self", ".", "error", "=", "0", "self", ...
Reset the lexer to process a new data feed.
[ "Reset", "the", "lexer", "to", "process", "a", "new", "data", "feed", "." ]
python
train
anentropic/django-conditional-aggregates
djconnagg/aggregates.py
https://github.com/anentropic/django-conditional-aggregates/blob/20d060c31344267b589b0cff9bdcd341a6b539ab/djconnagg/aggregates.py#L9-L26
def transform_q(q, query): """ Replaces (lookup, value) children of Q with equivalent WhereNode objects. This is a pre-prep of our Q object, ready for later rendering into SQL. Modifies in place, no need to return. (We could do this in render_q, but then we'd have to pass the Query object from ConditionalAggregate down into SQLConditionalAggregate, which Django avoids to do in their API so we try and follow their lead here) """ for i, child in enumerate(q.children): if isinstance(child, Q): transform_q(child, query) else: # child is (lookup, value) tuple where_node = query.build_filter(child) q.children[i] = where_node
[ "def", "transform_q", "(", "q", ",", "query", ")", ":", "for", "i", ",", "child", "in", "enumerate", "(", "q", ".", "children", ")", ":", "if", "isinstance", "(", "child", ",", "Q", ")", ":", "transform_q", "(", "child", ",", "query", ")", "else", ...
Replaces (lookup, value) children of Q with equivalent WhereNode objects. This is a pre-prep of our Q object, ready for later rendering into SQL. Modifies in place, no need to return. (We could do this in render_q, but then we'd have to pass the Query object from ConditionalAggregate down into SQLConditionalAggregate, which Django avoids to do in their API so we try and follow their lead here)
[ "Replaces", "(", "lookup", "value", ")", "children", "of", "Q", "with", "equivalent", "WhereNode", "objects", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/interface_ospfv3_conf/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/interface_ospfv3_conf/__init__.py#L368-L391
def _set_network(self, v, load=False): """ Setter method for network, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/network (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly. YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """network must be of a type compatible with enumeration""", 'defined-type': "brocade-ospfv3:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)""", }) self.__network = t if hasattr(self, '_set'): self._set()
[ "def", "_set_network", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base",...
Setter method for network, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/network (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly. YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type.
[ "Setter", "method", "for", "network", "mapped", "from", "YANG", "variable", "/", "routing_system", "/", "interface", "/", "ve", "/", "ipv6", "/", "interface_ospfv3_conf", "/", "network", "(", "enumeration", ")", "If", "this", "variable", "is", "read", "-", "...
python
train
glormph/msstitch
src/app/readers/fasta.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/fasta.py#L38-L60
def get_proteins_genes(fastafn, fastadelim=None, genefield=None): """This returns a tuple of (protein, gene, HGNC symbol, description) from a passed file. If the file is FASTA from ENSEMBL or UniProt, only genes and descriptions are given and symbol will be None. If the file is a ENSEMBL Biomart mapping file it tries to parse that and also return the rest""" with open(fastafn) as fp: firstline = next(fp).strip() if firstline[0] == '>': for record in parse_fasta(fastafn): rectype = get_record_type(record) yield (record.id, get_gene(record.description, rectype, fastadelim, genefield), None, record.description) elif 'Ensembl Gene ID' in firstline.split('\t'): for line in parse_biomart_fn(fastafn, 'Ensembl Gene ID', 'Ensembl Protein ID', 'Description', 'HGNC symbol', 'Associated Gene Name'): yield line elif 'Gene ID' in firstline.split('\t'): for line in parse_biomart_fn(fastafn, 'Gene ID', 'Protein ID', 'Description', 'HGNC symbol', 'Associated Gene Name'): yield line
[ "def", "get_proteins_genes", "(", "fastafn", ",", "fastadelim", "=", "None", ",", "genefield", "=", "None", ")", ":", "with", "open", "(", "fastafn", ")", "as", "fp", ":", "firstline", "=", "next", "(", "fp", ")", ".", "strip", "(", ")", "if", "first...
This returns a tuple of (protein, gene, HGNC symbol, description) from a passed file. If the file is FASTA from ENSEMBL or UniProt, only genes and descriptions are given and symbol will be None. If the file is a ENSEMBL Biomart mapping file it tries to parse that and also return the rest
[ "This", "returns", "a", "tuple", "of", "(", "protein", "gene", "HGNC", "symbol", "description", ")", "from", "a", "passed", "file", ".", "If", "the", "file", "is", "FASTA", "from", "ENSEMBL", "or", "UniProt", "only", "genes", "and", "descriptions", "are", ...
python
train
vlukes/dicom2fem
dicom2fem/mesh.py
https://github.com/vlukes/dicom2fem/blob/3056c977ca7119e01984d3aa0c4448a1c6c2430f/dicom2fem/mesh.py#L363-L399
def from_surface( surf_faces, mesh_in ): """ Create a mesh given a set of surface faces and the original mesh. """ aux = nm.concatenate([faces.ravel() for faces in surf_faces]) inod = nm.unique(aux) n_nod = len( inod ) n_nod_m, dim = mesh_in.coors.shape aux = nm.arange( n_nod, dtype=nm.int32 ) remap = nm.zeros( (n_nod_m,), nm.int32 ) remap[inod] = aux mesh = Mesh( mesh_in.name + "_surf" ) mesh.coors = mesh_in.coors[inod] mesh.ngroups = mesh_in.ngroups[inod] sfm = {3 : "2_3", 4 : "2_4"} mesh.conns = [] mesh.descs = [] mesh.mat_ids = [] for ii, sf in enumerate( surf_faces ): n_el, n_fp = sf.shape conn = remap[sf] mat_id = nm.empty( (conn.shape[0],), dtype = nm.int32 ) mat_id.fill( ii ) mesh.descs.append( sfm[n_fp] ) mesh.conns.append( conn ) mesh.mat_ids.append( mat_id ) mesh._set_shape_info() return mesh
[ "def", "from_surface", "(", "surf_faces", ",", "mesh_in", ")", ":", "aux", "=", "nm", ".", "concatenate", "(", "[", "faces", ".", "ravel", "(", ")", "for", "faces", "in", "surf_faces", "]", ")", "inod", "=", "nm", ".", "unique", "(", "aux", ")", "n...
Create a mesh given a set of surface faces and the original mesh.
[ "Create", "a", "mesh", "given", "a", "set", "of", "surface", "faces", "and", "the", "original", "mesh", "." ]
python
train
hozn/coilmq
coilmq/protocol/__init__.py
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/protocol/__init__.py#L240-L248
def disconnect(self, frame): """ Handles the DISCONNECT command: Unbinds the connection. Clients are supposed to send this command, but in practice it should not be relied upon. """ self.engine.log.debug("Disconnect") self.engine.unbind()
[ "def", "disconnect", "(", "self", ",", "frame", ")", ":", "self", ".", "engine", ".", "log", ".", "debug", "(", "\"Disconnect\"", ")", "self", ".", "engine", ".", "unbind", "(", ")" ]
Handles the DISCONNECT command: Unbinds the connection. Clients are supposed to send this command, but in practice it should not be relied upon.
[ "Handles", "the", "DISCONNECT", "command", ":", "Unbinds", "the", "connection", "." ]
python
train
majerteam/sqla_inspect
sqla_inspect/excel.py
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/excel.py#L150-L165
def _render_headers(self): """ Write the headers row """ headers = getattr(self, 'headers', ()) for index, col in enumerate(headers): # We write the headers cell = self.worksheet.cell(row=1, column=index + 1) cell.value = col['label'] index += 1 extra_headers = getattr(self, 'extra_headers', ()) for add_index, col in enumerate(extra_headers): cell = self.worksheet.cell(row=1, column=add_index + index + 1) cell.value = col['label']
[ "def", "_render_headers", "(", "self", ")", ":", "headers", "=", "getattr", "(", "self", ",", "'headers'", ",", "(", ")", ")", "for", "index", ",", "col", "in", "enumerate", "(", "headers", ")", ":", "# We write the headers", "cell", "=", "self", ".", ...
Write the headers row
[ "Write", "the", "headers", "row" ]
python
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/solr.py
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/solr.py#L99-L221
def update_schema(self, catalog="hypermap"): """ set the mapping in solr. :param catalog: core :return: """ schema_url = "{0}/solr/{1}/schema".format(SEARCH_URL, catalog) print schema_url # create a special type to draw better heatmaps. location_rpt_quad_5m_payload = { "add-field-type": { "name": "location_rpt_quad_5m", "class": "solr.SpatialRecursivePrefixTreeFieldType", "geo": False, "worldBounds": "ENVELOPE(-180, 180, 180, -180)", "prefixTree": "packedQuad", "distErrPct": "0.025", "maxDistErr": "0.001", "distanceUnits": "degrees" } } requests.post(schema_url, json=location_rpt_quad_5m_payload) # create a special type to implement ngrm text for search. text_ngrm_payload = { "add-field-type": { "name": "text_ngrm", "class": "solr.TextField", "positionIncrementGap": "100", "indexAnalyzer": { "tokenizer": { "class": "solr.WhitespaceTokenizerFactory" }, "filters": [ { "class": "solr.NGramFilterFactory", "minGramSize": "1", "maxGramSize": "50" }, { "class": "solr.LowerCaseFilterFactory" } ] }, "queryAnalyzer": { "tokenizer": { "class": "solr.WhitespaceTokenizerFactory" }, "filters": [ { "class": "solr.LowerCaseFilterFactory", } ] } } } requests.post(schema_url, json=text_ngrm_payload) # now the other fields fields = [ {"name": "abstract", "type": "string"}, {"name": "abstract_txt", "type": "text_ngrm"}, {"name": "area", "type": "pdouble"}, {"name": "availability", "type": "string"}, {"name": "bbox", "type": "location_rpt_quad_5m"}, {"name": "domain_name", "type": "string"}, {"name": "is_public", "type": "boolean"}, {"name": "is_valid", "type": "boolean"}, {"name": "keywords", "type": "string", "multiValued": True}, {"name": "last_status", "type": "boolean"}, {"name": "layer_category", "type": "string"}, {"name": "layer_date", "type": "pdate", "docValues": True}, {"name": "layer_datetype", "type": "string"}, {"name": "layer_id", "type": "plong"}, {"name": "layer_originator", "type": "string"}, {"name": "layer_originator_txt", "type": "text_ngrm"}, {"name": "layer_username", "type": "string"}, {"name": "layer_username_txt", "type": "text_ngrm"}, {"name": "location", "type": "string"}, {"name": "max_x", "type": "pdouble"}, {"name": "max_y", "type": "pdouble"}, {"name": "min_x", "type": "pdouble"}, {"name": "min_y", "type": "pdouble"}, {"name": "name", "type": "string"}, {"name": "recent_reliability", "type": "pdouble"}, {"name": "reliability", "type": "pdouble"}, {"name": "service_id", "type": "plong"}, {"name": "service_type", "type": "string"}, {"name": "srs", "type": "string", "multiValued": True}, {"name": "tile_url", "type": "string"}, {"name": "title", "type": "string"}, {"name": "title_txt", "type": "text_ngrm"}, {"name": "type", "type": "string"}, {"name": "url", "type": "string"}, {"name": "uuid", "type": "string", "required": True}, {"name": "centroid_y", "type": "pdouble"}, {"name": "centroid_x", "type": "pdouble"}, ] copy_fields = [ {"source": "*", "dest": "_text_"}, {"source": "title", "dest": "title_txt"}, {"source": "abstract", "dest": "abstract_txt"}, {"source": "layer_originator", "dest": "layer_originator_txt"}, {"source": "layer_username", "dest": "layer_username_txt"}, ] headers = { "Content-type": "application/json" } for field in fields: data = { "add-field": field } requests.post(schema_url, json=data, headers=headers) for field in copy_fields: data = { "add-copy-field": field } print data requests.post(schema_url, json=data, headers=headers)
[ "def", "update_schema", "(", "self", ",", "catalog", "=", "\"hypermap\"", ")", ":", "schema_url", "=", "\"{0}/solr/{1}/schema\"", ".", "format", "(", "SEARCH_URL", ",", "catalog", ")", "print", "schema_url", "# create a special type to draw better heatmaps.", "location_...
set the mapping in solr. :param catalog: core :return:
[ "set", "the", "mapping", "in", "solr", ".", ":", "param", "catalog", ":", "core", ":", "return", ":" ]
python
train
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L641-L667
def changeSize(self, fromLayerSize, toLayerSize): """ Changes the size of the connection depending on the size change of either source or destination layer. Should only be called through Network.changeLayerSize(). """ if toLayerSize <= 0 or fromLayerSize <= 0: raise LayerError('changeSize() called with invalid layer size.', \ (fromLayerSize, toLayerSize)) dweight = Numeric.zeros((fromLayerSize, toLayerSize), 'f') wed = Numeric.zeros((fromLayerSize, toLayerSize), 'f') wedLast = Numeric.zeros((fromLayerSize, toLayerSize), 'f') weight = randomArray((fromLayerSize, toLayerSize), self.toLayer._maxRandom) # copy from old to new, considering one is smaller minFromLayerSize = min( fromLayerSize, self.fromLayer.size) minToLayerSize = min( toLayerSize, self.toLayer.size) for i in range(minFromLayerSize): for j in range(minToLayerSize): wed[i][j] = self.wed[i][j] wedLast[i][j] = self.wedLast[i][j] dweight[i][j] = self.dweight[i][j] weight[i][j] = self.weight[i][j] self.dweight = dweight self.wed = wed self.wedLast = wedLast self.weight = weight
[ "def", "changeSize", "(", "self", ",", "fromLayerSize", ",", "toLayerSize", ")", ":", "if", "toLayerSize", "<=", "0", "or", "fromLayerSize", "<=", "0", ":", "raise", "LayerError", "(", "'changeSize() called with invalid layer size.'", ",", "(", "fromLayerSize", ",...
Changes the size of the connection depending on the size change of either source or destination layer. Should only be called through Network.changeLayerSize().
[ "Changes", "the", "size", "of", "the", "connection", "depending", "on", "the", "size", "change", "of", "either", "source", "or", "destination", "layer", ".", "Should", "only", "be", "called", "through", "Network", ".", "changeLayerSize", "()", "." ]
python
train
BlackEarth/bl
bl/csv.py
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/csv.py#L29-L32
def excel_key(index): """create a key for index by converting index into a base-26 number, using A-Z as the characters.""" X = lambda n: ~n and X((n // 26)-1) + chr(65 + (n % 26)) or '' return X(int(index))
[ "def", "excel_key", "(", "index", ")", ":", "X", "=", "lambda", "n", ":", "~", "n", "and", "X", "(", "(", "n", "//", "26", ")", "-", "1", ")", "+", "chr", "(", "65", "+", "(", "n", "%", "26", ")", ")", "or", "''", "return", "X", "(", "i...
create a key for index by converting index into a base-26 number, using A-Z as the characters.
[ "create", "a", "key", "for", "index", "by", "converting", "index", "into", "a", "base", "-", "26", "number", "using", "A", "-", "Z", "as", "the", "characters", "." ]
python
train
codeinn/vcs
vcs/cli.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/cli.py#L496-L501
def get_changeset(self, **options): """ Returns changeset for given ``options``. """ cid = options.get('changeset_id', None) return self.repo.get_changeset(cid)
[ "def", "get_changeset", "(", "self", ",", "*", "*", "options", ")", ":", "cid", "=", "options", ".", "get", "(", "'changeset_id'", ",", "None", ")", "return", "self", ".", "repo", ".", "get_changeset", "(", "cid", ")" ]
Returns changeset for given ``options``.
[ "Returns", "changeset", "for", "given", "options", "." ]
python
train