positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def commit_async(self, offsets=None, callback=None): """Commit offsets to kafka asynchronously, optionally firing callback. This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API should not be used. To avoid re-processing the last message read if a consumer is restarted, the committed offset should be the next message your application should consume, i.e.: last_offset + 1. This is an asynchronous call and will not block. Any errors encountered are either passed to the callback (if provided) or discarded. Arguments: offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict to commit with the configured group_id. Defaults to currently consumed offsets for all subscribed partitions. callback (callable, optional): Called as callback(offsets, response) with response as either an Exception or an OffsetCommitResponse struct. This callback can be used to trigger custom actions when a commit request completes. Returns: kafka.future.Future """ assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1' assert self.config['group_id'] is not None, 'Requires group_id' if offsets is None: offsets = self._subscription.all_consumed_offsets() log.debug("Committing offsets: %s", offsets) future = self._coordinator.commit_offsets_async( offsets, callback=callback) return future
Commit offsets to kafka asynchronously, optionally firing callback. This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API should not be used. To avoid re-processing the last message read if a consumer is restarted, the committed offset should be the next message your application should consume, i.e.: last_offset + 1. This is an asynchronous call and will not block. Any errors encountered are either passed to the callback (if provided) or discarded. Arguments: offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict to commit with the configured group_id. Defaults to currently consumed offsets for all subscribed partitions. callback (callable, optional): Called as callback(offsets, response) with response as either an Exception or an OffsetCommitResponse struct. This callback can be used to trigger custom actions when a commit request completes. Returns: kafka.future.Future
def copy_location(new_node, old_node): """ Copy the source location hint (`lineno` and `col_offset`) from the old to the new node if possible and return the new one. """ for attr in 'lineno', 'col_offset': if attr in old_node._attributes and attr in new_node._attributes \ and hasattr(old_node, attr): setattr(new_node, attr, getattr(old_node, attr)) return new_node
Copy the source location hint (`lineno` and `col_offset`) from the old to the new node if possible and return the new one.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text_normalized') and self.text_normalized is not None: _dict['text_normalized'] = self.text_normalized return _dict
Return a json dictionary representing this model.
def object_to_dict(cls, obj): """ This function converts Objects into Dictionary """ dict_obj = dict() if obj is not None: if type(obj) == list: dict_list = [] for inst in obj: dict_list.append(cls.object_to_dict(inst)) dict_obj["list"] = dict_list elif not cls.is_primitive(obj): for key in obj.__dict__: # is an object if type(obj.__dict__[key]) == list: dict_list = [] for inst in obj.__dict__[key]: dict_list.append(cls.object_to_dict(inst)) dict_obj[key] = dict_list elif not cls.is_primitive(obj.__dict__[key]): temp_dict = cls.object_to_dict(obj.__dict__[key]) dict_obj[key] = temp_dict else: dict_obj[key] = obj.__dict__[key] elif cls.is_primitive(obj): return obj return dict_obj
This function converts Objects into Dictionary
def AddService(self, new_service): """Add a new service to the list of ones we know about. Args: new_service (WindowsService): the service to add. """ for service in self._services: if new_service == service: # If this service is the same as one we already know about, we # just want to add where it came from. service.sources.append(new_service.sources[0]) return # We only add a new object to our list if we don't have # an identical one already. self._services.append(new_service)
Add a new service to the list of ones we know about. Args: new_service (WindowsService): the service to add.
def process_deny_action(processors, action, argument): """Process deny action.""" for processor in processors: processor(action, argument) db.session.commit()
Process deny action.
def is_on_filesystem(value, **kwargs): """Indicate whether ``value`` is a file or directory that exists on the local filesystem. :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: value = validators.path_exists(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
Indicate whether ``value`` is a file or directory that exists on the local filesystem. :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
def version(self, v): """ Sets the CoAP version :param v: the version :raise AttributeError: if value is not 1 """ if not isinstance(v, int) or v != 1: raise AttributeError self._version = v
Sets the CoAP version :param v: the version :raise AttributeError: if value is not 1
def system_monitor_sfp_alert_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor") sfp = ET.SubElement(system_monitor, "sfp") alert = ET.SubElement(sfp, "alert") state = ET.SubElement(alert, "state") state.text = kwargs.pop('state') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def setActiveModule(Module): r"""Helps with collecting the members of the imported modules. """ module_name = Module.__name__ if module_name not in ModuleMembers: ModuleMembers[module_name] = [] ModulesQ.append(module_name) Group(Module, {}) # brand the module with __ec_member__ state.ActiveModuleMemberQ = ModuleMembers[module_name]
r"""Helps with collecting the members of the imported modules.
def load_gettext_translations(directory: str, domain: str) -> None: """Loads translations from `gettext`'s locale tree Locale tree is similar to system's ``/usr/share/locale``, like:: {directory}/{lang}/LC_MESSAGES/{domain}.mo Three steps are required to have your app translated: 1. Generate POT translation file:: xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc 2. Merge against existing POT file:: msgmerge old.po mydomain.po > new.po 3. Compile:: msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo """ global _translations global _supported_locales global _use_gettext _translations = {} for lang in os.listdir(directory): if lang.startswith("."): continue # skip .svn, etc if os.path.isfile(os.path.join(directory, lang)): continue try: os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo")) _translations[lang] = gettext.translation( domain, directory, languages=[lang] ) except Exception as e: gen_log.error("Cannot load translation for '%s': %s", lang, str(e)) continue _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) _use_gettext = True gen_log.debug("Supported locales: %s", sorted(_supported_locales))
Loads translations from `gettext`'s locale tree Locale tree is similar to system's ``/usr/share/locale``, like:: {directory}/{lang}/LC_MESSAGES/{domain}.mo Three steps are required to have your app translated: 1. Generate POT translation file:: xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc 2. Merge against existing POT file:: msgmerge old.po mydomain.po > new.po 3. Compile:: msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
def get_chat(chat_id, **kwargs): """ Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :returns: Returns a Chat object on success. :rtype: Chat """ # required args params = dict( chat_id=chat_id, ) return TelegramBotRPCRequest('getChat', params=params, on_result=lambda result: Chat.from_result(result), **kwargs)
Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :returns: Returns a Chat object on success. :rtype: Chat
def get_next_of_type(self, processor_type): """Get the next available processor of a particular type and increment its occupancy counter. Args: processor_type (ProcessorType): The processor type associated with a zmq identity. Returns: (Processor): Information about the transaction processor """ with self._condition: if processor_type not in self: self.wait_for_registration(processor_type) try: processor = self[processor_type].next_processor() except NoProcessorVacancyError: processor = self.wait_for_vacancy(processor_type) processor.inc_occupancy() return processor
Get the next available processor of a particular type and increment its occupancy counter. Args: processor_type (ProcessorType): The processor type associated with a zmq identity. Returns: (Processor): Information about the transaction processor
def _build_validation_payload(self, request): """ Extract relevant information from request to build a ClientValidationJWT :param PreparedRequest request: request we will extract information from. :return: ValidationPayload """ parsed = urlparse(request.url) path = parsed.path query_string = parsed.query or '' return ValidationPayload( method=request.method, path=path, query_string=query_string, all_headers=request.headers, signed_headers=ValidationClient.__SIGNED_HEADERS, body=request.body or '' )
Extract relevant information from request to build a ClientValidationJWT :param PreparedRequest request: request we will extract information from. :return: ValidationPayload
def _mems_updated_cb(self): """Called when the memories have been identified""" logger.info('Memories finished updating') self.param.refresh_toc(self._param_toc_updated_cb, self._toc_cache)
Called when the memories have been identified
def run(bam_file, data, out_dir): """Run viral QC analysis: 1. Extract the unmapped reads 2. BWA-MEM to the viral sequences from GDC database https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files 3. Report viruses that are in more than 50% covered by at least 5x """ source_link = 'https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files' viral_target = "gdc-viral" out = {} if vcfutils.get_paired_phenotype(data): viral_refs = [x for x in dd.get_viral_files(data) if os.path.basename(x) == "%s.fa" % viral_target] if viral_refs and utils.file_exists(viral_refs[0]): viral_ref = viral_refs[0] viral_bam = os.path.join(utils.safe_makedir(out_dir), "%s-%s.bam" % (dd.get_sample_name(data), utils.splitext_plus(os.path.basename(viral_ref))[0])) out_file = "%s-completeness.txt" % utils.splitext_plus(viral_bam)[0] cores = dd.get_num_cores(data) if not utils.file_uptodate(out_file, bam_file): if not utils.file_uptodate(viral_bam, bam_file): with file_transaction(data, viral_bam) as tx_out_file: tmpfile = "%s-tmp" % utils.splitext_plus(tx_out_file)[0] cmd = ("samtools view -u -f 4 {bam_file} | " "bamtofastq collate=0 | " "bwa mem -t {cores} {viral_ref} - | " "bamsort tmpfile={tmpfile} inputthreads={cores} outputthreads={cores} " "inputformat=sam index=1 indexfilename={tx_out_file}.bai O={tx_out_file}") do.run(cmd.format(**locals()), "Align unmapped reads to viral genome") with file_transaction(data, out_file) as tx_out_file: sample_name = dd.get_sample_name(data) mosdepth_prefix = os.path.splitext(viral_bam)[0] cmd = ("mosdepth -t {cores} {mosdepth_prefix} {viral_bam} -n --thresholds 1,5,25 --by " "<(awk 'BEGIN {{FS=\"\\t\"}}; {{print $1 FS \"0\" FS $2}}' {viral_ref}.fai) && " "echo '## Viral sequences (from {source_link}) found in unmapped reads' > {tx_out_file} &&" "echo '## Sample: {sample_name}' >> {tx_out_file} && " "echo '#virus\tsize\tdepth\t1x\t5x\t25x' >> {tx_out_file} && " "paste <(zcat {mosdepth_prefix}.regions.bed.gz) <(zgrep -v ^# {mosdepth_prefix}.thresholds.bed.gz) | " "awk 'BEGIN {{FS=\"\\t\"}} {{ print $1 FS $3 FS $4 FS $10/$3 FS $11/$3 FS $12/$3}}' | " "sort -n -r -k 5,5 >> {tx_out_file}") do.run(cmd.format(**locals()), "Analyse coverage of viral genomes") out["base"] = out_file out["secondary"] = [] return out
Run viral QC analysis: 1. Extract the unmapped reads 2. BWA-MEM to the viral sequences from GDC database https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files 3. Report viruses that are in more than 50% covered by at least 5x
def clean_asciidoc(text): r""" Transform asciidoc text into ASCII text that NL parsers can handle TODO: Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc >>> clean_asciidoc('**Hello** _world_!') '"Hello" "world"!' """ text = re.sub(r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])', r'"\2', text) text = re.sub(r'([a-zA-Z0-9])[\]_*]{1,2}', r'\1"', text) return text
r""" Transform asciidoc text into ASCII text that NL parsers can handle TODO: Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc >>> clean_asciidoc('**Hello** _world_!') '"Hello" "world"!'
def output_hist(self, output_hist: Hist, input_observable: Any, **kwargs: Dict[str, Any]) -> Union[Hist, Any]: """ Return an output object. It should store the ``output_hist``. Note: The output object could just be the raw histogram. Note: This function is just a basic placeholder which returns the given output object (a histogram) and likely should be overridden. Args: output_hist: The output histogram input_observable (object): The corresponding input object. It could be a histogram or something more complex. kwargs: Projection information dict combined with additional arguments passed to the projection function Return: The output object which should be stored in the output dict. By default, it returns the output hist. """ return output_hist
Return an output object. It should store the ``output_hist``. Note: The output object could just be the raw histogram. Note: This function is just a basic placeholder which returns the given output object (a histogram) and likely should be overridden. Args: output_hist: The output histogram input_observable (object): The corresponding input object. It could be a histogram or something more complex. kwargs: Projection information dict combined with additional arguments passed to the projection function Return: The output object which should be stored in the output dict. By default, it returns the output hist.
def validate_address(value): """ Helper function for validating an address """ if is_bytes(value): if not is_binary_address(value): raise InvalidAddress("Address must be 20 bytes when input type is bytes", value) return if not isinstance(value, str): raise TypeError('Address {} must be provided as a string'.format(value)) if not is_hex_address(value): raise InvalidAddress("Address must be 20 bytes, as a hex string with a 0x prefix", value) if not is_checksum_address(value): if value == value.lower(): raise InvalidAddress( "Web3.py only accepts checksum addresses. " "The software that gave you this non-checksum address should be considered unsafe, " "please file it as a bug on their platform. " "Try using an ENS name instead. Or, if you must accept lower safety, " "use Web3.toChecksumAddress(lower_case_address).", value, ) else: raise InvalidAddress( "Address has an invalid EIP-55 checksum. " "After looking up the address from the original source, try again.", value, )
Helper function for validating an address
def get_dot(stop=True): """Returns a string containing a DOT file. Setting stop to True will cause the trace to stop. """ defaults = [] nodes = [] edges = [] # define default attributes for comp, comp_attr in graph_attributes.items(): attr = ', '.join( '%s = "%s"' % (attr, val) for attr, val in comp_attr.items() ) defaults.append( '\t%(comp)s [ %(attr)s ];\n' % locals() ) # define nodes for func, hits in func_count.items(): calls_frac, total_time_frac, total_time = _frac_calculation(func, hits) col = settings['node_colour'](calls_frac, total_time_frac) attribs = ['%s="%s"' % a for a in settings['node_attributes'].items()] node_str = '"%s" [%s];' % (func, ', '.join(attribs)) nodes.append( node_str % locals() ) # define edges for fr_key, fr_val in call_dict.items(): if not fr_key: continue for to_key, to_val in fr_val.items(): calls_frac, total_time_frac, totla_time = \ _frac_calculation(to_key, to_val) col = settings['edge_colour'](calls_frac, total_time_frac) edge = '[ color = "%s", label="%s" ]' % (col, to_val) edges.append('"%s"->"%s" %s;' % (fr_key, to_key, edge)) defaults = '\n\t'.join( defaults ) nodes = '\n\t'.join( nodes ) edges = '\n\t'.join( edges ) dot_fmt = ("digraph G {\n" " %(defaults)s\n\n" " %(nodes)s\n\n" " %(edges)s\n}\n" ) return dot_fmt % locals()
Returns a string containing a DOT file. Setting stop to True will cause the trace to stop.
def send_special_keys(self, value): """ Send special keys such as <enter> or <delete> @rtype: WebElementWrapper @return: Self """ def send_keys_element(): """ Wrapper to send keys """ return self.element.send_keys(value) self.execute_and_handle_webelement_exceptions(send_keys_element, 'send keys') return self
Send special keys such as <enter> or <delete> @rtype: WebElementWrapper @return: Self
def erosion(mapfile, dilated): """ We will continue to work with the modified Mapfile If we wanted to start from scratch we could simply reread it """ ll = mappyfile.find(mapfile["layers"], "name", "line") ll["status"] = "OFF" pl = mappyfile.find(mapfile["layers"], "name", "polygon") # make a deep copy of the polygon layer in the Map # so any modification are made to this layer only pl2 = deepcopy(pl) pl2["name"] = "newpolygon" mapfile["layers"].append(pl2) dilated = dilated.buffer(-0.3) pl2["features"][0]["wkt"] = dilated.wkt style = pl["classes"][0]["styles"][0] style["color"] = "#999999" style["outlinecolor"] = "#b2b2b2"
We will continue to work with the modified Mapfile If we wanted to start from scratch we could simply reread it
def _get_si(): ''' Authenticate with vCenter server and return service instance object. ''' url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ) password = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) protocol = config.get_cloud_config_value( 'protocol', get_configured_provider(), __opts__, search_global=False, default='https' ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, search_global=False, default=443 ) return salt.utils.vmware.get_service_instance(url, username, password, protocol=protocol, port=port)
Authenticate with vCenter server and return service instance object.
def root_task_parser(): """ Returns a new *ArgumentParser* instance that only contains paremeter actions of the root task. The returned instance is cached. """ global _root_task_parser if _root_task_parser: return _root_task_parser luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance() if not luigi_parser: return None root_task = luigi_parser.known_args.root_task # get all root task parameter destinations root_dests = [] for task_name, _, param_name, _ in luigi.task_register.Register.get_all_params(): if task_name == root_task: root_dests.append(param_name) # create a new parser and add all root actions _root_task_parser = ArgumentParser(add_help=False) for action in list(full_parser()._actions): if not action.option_strings or action.dest in root_dests: _root_task_parser._add_action(action) logger.debug("build luigi argument parser for root task {}".format(root_task)) return _root_task_parser
Returns a new *ArgumentParser* instance that only contains paremeter actions of the root task. The returned instance is cached.
def binned_bitsets_from_list( list=[] ): """Read a list into a dictionary of bitsets""" last_chrom = None last_bitset = None bitsets = dict() for l in list: chrom = l[0] if chrom != last_chrom: if chrom not in bitsets: bitsets[chrom] = BinnedBitSet(MAX) last_chrom = chrom last_bitset = bitsets[chrom] start, end = int( l[1] ), int( l[2] ) last_bitset.set_range( start, end - start ) return bitsets
Read a list into a dictionary of bitsets
def reset(self): """ Reset the state of the sandbox. http://docs.fiesta.cc/sandbox.html#post--reset """ path = 'reset' request_data = {} # Need to put data into the request to force urllib2 to make it a POST request response_data = self.request(path, request_data) success = response_data['reset'] # True of False return success
Reset the state of the sandbox. http://docs.fiesta.cc/sandbox.html#post--reset
def bbin(obj: Union[str, Element]) -> str: """ Boldify built in types @param obj: object name or id @return: """ return obj.name if isinstance(obj, Element ) else f'**{obj}**' if obj in builtin_names else obj
Boldify built in types @param obj: object name or id @return:
def make_vbox_dirs(max_vbox_id, output_dir, topology_name): """ Create VirtualBox working directories if required :param int max_vbox_id: Number of directories to create :param str output_dir: Output directory :param str topology_name: Topology name """ if max_vbox_id is not None: for i in range(1, max_vbox_id + 1): vbox_dir = os.path.join(output_dir, topology_name + '-files', 'vbox', 'vm-%s' % i) os.makedirs(vbox_dir)
Create VirtualBox working directories if required :param int max_vbox_id: Number of directories to create :param str output_dir: Output directory :param str topology_name: Topology name
def background(cl, proto=EchoProcess, **kw): """ Use the reactor to run a process in the background. Keep the pid around. ``proto'' may be any callable which returns an instance of ProcessProtocol """ if isinstance(cl, basestring): cl = shlex.split(cl) if not cl[0].startswith('/'): path = which(cl[0]) assert path, '%s not found' % cl[0] cl[0] = path[0] d = Deferred() proc = reactor.spawnProcess( proto(name=basename(cl[0]), deferred=d), cl[0], cl, env=os.environ, **kw) daycare.add(proc.pid) return d
Use the reactor to run a process in the background. Keep the pid around. ``proto'' may be any callable which returns an instance of ProcessProtocol
def convex_conj(self): """Convex conjugate of the nuclear norm. The convex conjugate is the indicator function on the unit ball of the dual norm where the dual norm is obtained by taking the conjugate exponent of both the outer and singular vector exponents. """ return IndicatorNuclearNormUnitBall( self.domain, conj_exponent(self.outernorm.exponent), conj_exponent(self.pwisenorm.exponent))
Convex conjugate of the nuclear norm. The convex conjugate is the indicator function on the unit ball of the dual norm where the dual norm is obtained by taking the conjugate exponent of both the outer and singular vector exponents.
def DeletePermission(self, permission_link, options=None): """Deletes a permission. :param str permission_link: The link to the permission. :param dict options: The request options for the request. :return: The deleted Permission. :rtype: dict """ if options is None: options = {} path = base.GetPathFromLink(permission_link) permission_id = base.GetResourceIdOrFullNameFromLink(permission_link) return self.DeleteResource(path, 'permissions', permission_id, None, options)
Deletes a permission. :param str permission_link: The link to the permission. :param dict options: The request options for the request. :return: The deleted Permission. :rtype: dict
def repr_part(self): """String usable in a space's ``__repr__`` method.""" optargs = [('weighting', self.const, 1.0), ('exponent', self.exponent, 2.0)] return signature_string([], optargs, mod=':.4')
String usable in a space's ``__repr__`` method.
def undo(self, hard=False): """Makes last commit not exist""" if not self.fake: return self.repo.git.reset('HEAD^', working_tree=hard) else: click.echo(crayons.red('Faked! >>> git reset {}{}' .format('--hard ' if hard else '', 'HEAD^'))) return 0
Makes last commit not exist
def run(): """CLI main entry point.""" # Use print() instead of logging when running in CLI mode: set_pyftpsync_logger(None) parser = argparse.ArgumentParser( description="Synchronize folders over FTP.", epilog="See also https://github.com/mar10/pyftpsync", parents=[verbose_parser], ) # Note: we want to allow --version to be combined with --verbose. However # on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used. if check_cli_verbose(3) > 3: version_info = "pyftpsync/{} Python/{} {}".format( __version__, PYTHON_VERSION, platform.platform() ) else: version_info = "{}".format(__version__) parser.add_argument("-V", "--version", action="version", version=version_info) subparsers = parser.add_subparsers(help="sub-command help") # --- Create the parser for the "upload" command --------------------------- sp = subparsers.add_parser( "upload", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="copy new and modified files to remote folder", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--force", action="store_true", help="overwrite remote files, even if the target is newer " "(but no conflict was detected)", ) sp.add_argument( "--resolve", default="ask", choices=["local", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.add_argument( "--delete", action="store_true", help="remove remote files if they don't exist locally", ) sp.add_argument( "--delete-unmatched", action="store_true", help="remove remote files if they don't exist locally " "or don't match the current filter (implies '--delete' option)", ) sp.set_defaults(command="upload") # --- Create the parser for the "download" command ------------------------- sp = subparsers.add_parser( "download", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="copy new and modified files from remote folder to local target", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--force", action="store_true", help="overwrite local files, even if the target is newer " "(but no conflict was detected)", ) sp.add_argument( "--resolve", default="ask", choices=["remote", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.add_argument( "--delete", action="store_true", help="remove local files if they don't exist on remote target", ) sp.add_argument( "--delete-unmatched", action="store_true", help="remove local files if they don't exist on remote target " "or don't match the current filter (implies '--delete' option)", ) sp.set_defaults(command="download") # --- Create the parser for the "sync" command ----------------------------- sp = subparsers.add_parser( "sync", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="synchronize new and modified files between remote folder and local target", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--resolve", default="ask", choices=["old", "new", "local", "remote", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.set_defaults(command="sync") # --- Create the parser for the "run" command ----------------------------- add_run_parser(subparsers) # --- Create the parser for the "scan" command ----------------------------- add_scan_parser(subparsers) # --- Parse command line --------------------------------------------------- args = parser.parse_args() args.verbose -= args.quiet del args.quiet # print("verbose", args.verbose) ftp_debug = 0 if args.verbose >= 6: ftp_debug = 1 # Modify the `args` from the `pyftpsync.yaml` config: if getattr(args, "command", None) == "run": handle_run_command(parser, args) if callable(getattr(args, "command", None)): # scan_handler try: return args.command(parser, args) except KeyboardInterrupt: print("\nAborted by user.", file=sys.stderr) sys.exit(3) elif not hasattr(args, "command"): parser.error( "missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')" ) # Post-process and check arguments if hasattr(args, "delete_unmatched") and args.delete_unmatched: args.delete = True args.local_target = make_target(args.local, {"ftp_debug": ftp_debug}) if args.remote == ".": parser.error("'.' is expected to be the local target (not remote)") args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug}) if not isinstance(args.local_target, FsTarget) and isinstance( args.remote_target, FsTarget ): parser.error("a file system target is expected to be local") # Let the command handler do its thing opts = namespace_to_dict(args) if args.command == "upload": s = UploadSynchronizer(args.local_target, args.remote_target, opts) elif args.command == "download": s = DownloadSynchronizer(args.local_target, args.remote_target, opts) elif args.command == "sync": s = BiDirSynchronizer(args.local_target, args.remote_target, opts) else: parser.error("unknown command '{}'".format(args.command)) s.is_script = True try: s.run() except KeyboardInterrupt: print("\nAborted by user.", file=sys.stderr) sys.exit(3) finally: # Prevent sporadic exceptions in ftplib, when closing in __del__ s.local.close() s.remote.close() stats = s.get_stats() if args.verbose >= 5: pprint(stats) elif args.verbose >= 1: if args.dry_run: print("(DRY-RUN) ", end="") print( "Wrote {}/{} files in {} directories, skipped: {}.".format( stats["files_written"], stats["local_files"], stats["local_dirs"], stats["conflict_files_skipped"], ), end="", ) if stats["interactive_ask"]: print() else: print(" Elap: {}.".format(stats["elap_str"])) return
CLI main entry point.
def add_request_log_fields( self, log_fields: LogFields, call_details: Union[grpc.HandlerCallDetails, grpc.ClientCallDetails] ): """Add log fields related to a request to the provided log fields :param log_fields: log fields instance to which to add the fields :param call_details: some information regarding the call """ service, method = call_details.method[1:].split("/") log_fields.add_fields({ "system": "grpc", "span.kind": self.KIND, "grpc.service": service, "grpc.method": method, })
Add log fields related to a request to the provided log fields :param log_fields: log fields instance to which to add the fields :param call_details: some information regarding the call
def output_callback(self, line, kill_switch): """Set status of openvpn according to what we process""" self.notifications += line + "\n" if "Initialization Sequence Completed" in line: self.started = True if "ERROR:" in line or "Cannot resolve host address:" in line: self.error = True if "process exiting" in line: self.stopped = True
Set status of openvpn according to what we process
def drawAxis(self, painter, rect, axis): """ Draws the axis for the given painter. :param painter | <QPainter> rect | <QRect> """ if not axis: return # draw the axis lines painter.save() pen = QPen(self.axisColor()) pen.setWidth(3) painter.setPen(pen) # draw the vertical line if axis.orientation() == Qt.Vertical: line = QLineF(rect.right(), rect.top(), rect.right(), rect.bottom()) painter.drawLine(line) painter.setFont(axis.labelFont()) for y, height, label in self._buildData.get('grid_h_labels', []): painter.drawText(0, y - height / 2.0, rect.width() - 3, height, Qt.AlignRight | Qt.AlignVCenter, label) painter.translate(0, rect.center().y()) painter.rotate(-90) painter.setFont(axis.titleFont()) painter.drawText(-rect.height()/2, 0, rect.height(), rect.width(), Qt.AlignHCenter | Qt.AlignTop, axis.title()) # draw the horizontal line else: line = QLineF(rect.left(), rect.top(), rect.right(), rect.top()) painter.setFont(axis.titleFont()) painter.drawText(rect, Qt.AlignHCenter | Qt.AlignBottom, axis.title()) painter.drawLine(line) painter.setFont(axis.labelFont()) for x, width, label in self._buildData.get('grid_v_labels', []): painter.drawText(x - width / 2.0, 3, width, rect.height() - 6, Qt.AlignHCenter | Qt.AlignTop, label) painter.restore()
Draws the axis for the given painter. :param painter | <QPainter> rect | <QRect>
def focusout(self, event): """Change style on focus out events.""" bc = self.style.lookup("TEntry", "bordercolor", ("!focus",)) dc = self.style.lookup("TEntry", "darkcolor", ("!focus",)) lc = self.style.lookup("TEntry", "lightcolor", ("!focus",)) self.style.configure("%s.spinbox.TFrame" % self.frame, bordercolor=bc, darkcolor=dc, lightcolor=lc)
Change style on focus out events.
def console_new(w: int, h: int) -> tcod.console.Console: """Return an offscreen console of size: w,h. .. deprecated:: 8.5 Create new consoles using :any:`tcod.console.Console` instead of this function. """ return tcod.console.Console(w, h)
Return an offscreen console of size: w,h. .. deprecated:: 8.5 Create new consoles using :any:`tcod.console.Console` instead of this function.
def repl_proc(self, inputstring, log=True, **kwargs): """Process using replprocs.""" return self.apply_procs(self.replprocs, kwargs, inputstring, log=log)
Process using replprocs.
def discrete(self, vertices, scale=1.0): """ Discretize into a world- space path. Parameters ------------ vertices: (n, dimension) float Points in space scale : float Size of overall scene for numerical comparisons Returns ------------- discrete: (m, dimension) float Path in space composed of line segments """ discrete = self._orient(vertices[self.points]) return discrete
Discretize into a world- space path. Parameters ------------ vertices: (n, dimension) float Points in space scale : float Size of overall scene for numerical comparisons Returns ------------- discrete: (m, dimension) float Path in space composed of line segments
def analyze_cluster_size_per_scan_parameter(input_file_hits, output_file_cluster_size, parameter='GDAC', max_chunk_size=10000000, overwrite_output_files=False, output_pdf=None): ''' This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed ''' logging.info('Analyze the cluster sizes for different ' + parameter + ' settings for ' + input_file_hits) if os.path.isfile(output_file_cluster_size) and not overwrite_output_files: # skip analysis if already done logging.info('Analyzed cluster size file ' + output_file_cluster_size + ' already exists. Skip cluster size analysis.') else: with tb.open_file(output_file_cluster_size, mode="w") as out_file_h5: # file to write the data into filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data parameter_goup = out_file_h5.create_group(out_file_h5.root, parameter, title=parameter) # note to store the data cluster_size_total = None # final array for the cluster size per GDAC with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: # open the actual hit file meta_data_array = in_hit_file_h5.root.meta_data[:] scan_parameter = analysis_utils.get_scan_parameter(meta_data_array) # get the scan parameters if scan_parameter: # if a GDAC scan parameter was used analyze the cluster size per GDAC setting scan_parameter_values = scan_parameter[parameter] # scan parameter settings used if len(scan_parameter_values) == 1: # only analyze per scan step if there are more than one scan step logging.warning('The file ' + str(input_file_hits) + ' has no different ' + str(parameter) + ' parameter values. Omit analysis.') else: logging.info('Analyze ' + input_file_hits + ' per scan parameter ' + parameter + ' for ' + str(len(scan_parameter_values)) + ' values from ' + str(np.amin(scan_parameter_values)) + ' to ' + str(np.amax(scan_parameter_values))) event_numbers = analysis_utils.get_meta_data_at_scan_parameter(meta_data_array, parameter)['event_number'] # get the event numbers in meta_data where the scan parameter changes parameter_ranges = np.column_stack((scan_parameter_values, analysis_utils.get_ranges_from_array(event_numbers))) hit_table = in_hit_file_h5.root.Hits analysis_utils.index_event_number(hit_table) total_hits, total_hits_2, index = 0, 0, 0 chunk_size = max_chunk_size # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_cluster_size_hist = True analyze_data.create_cluster_tot_hist = True analyze_data.histogram.set_no_scan_parameter() # one has to tell histogram the # of scan parameters for correct occupancy hist allocation progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80) progress_bar.start() for parameter_index, parameter_range in enumerate(parameter_ranges): # loop over the selected events analyze_data.reset() # resets the data of the last analysis logging.debug('Analyze GDAC = ' + str(parameter_range[0]) + ' ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%') start_event_number = parameter_range[1] stop_event_number = parameter_range[2] logging.debug('Data from events = [' + str(start_event_number) + ',' + str(stop_event_number) + '[') actual_parameter_group = out_file_h5.create_group(parameter_goup, name=parameter + '_' + str(parameter_range[0]), title=parameter + '_' + str(parameter_range[0])) # loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=chunk_size): total_hits += hits.shape[0] analyze_data.analyze_hits(hits) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] progress_bar.update(index) chunk_size = int(1.05 * readout_hit_len) if int(1.05 * readout_hit_len) < max_chunk_size else max_chunk_size # to increase the readout speed, estimated the number of hits for one read instruction if chunk_size < 50: # limit the lower chunk size, there can always be a crazy event with more than 20 hits chunk_size = 50 # get occupancy hist occupancy = analyze_data.histogram.get_occupancy() # just check here if histogram is consistent # store and plot cluster size hist cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist() cluster_size_hist_table = out_file_h5.create_carray(actual_parameter_group, name='HistClusterSize', title='Cluster Size Histogram', atom=tb.Atom.from_dtype(cluster_size_hist.dtype), shape=cluster_size_hist.shape, filters=filter_table) cluster_size_hist_table[:] = cluster_size_hist if output_pdf is not False: plotting.plot_cluster_size(hist=cluster_size_hist, title='Cluster size (' + str(np.sum(cluster_size_hist)) + ' entries) for ' + parameter + ' = ' + str(scan_parameter_values[parameter_index]), filename=output_pdf) if cluster_size_total is None: # true if no data was appended to the array yet cluster_size_total = cluster_size_hist else: cluster_size_total = np.vstack([cluster_size_total, cluster_size_hist]) total_hits_2 += np.sum(occupancy) progress_bar.finish() if total_hits != total_hits_2: logging.warning('Analysis shows inconsistent number of hits. Check needed!') logging.info('Analyzed %d hits!', total_hits) cluster_size_total_out = out_file_h5.create_carray(out_file_h5.root, name='AllHistClusterSize', title='All Cluster Size Histograms', atom=tb.Atom.from_dtype(cluster_size_total.dtype), shape=cluster_size_total.shape, filters=filter_table) cluster_size_total_out[:] = cluster_size_total
This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed
def get_excluded_categories(): """Get excluded category IDs.""" from indico_livesync.plugin import LiveSyncPlugin return {int(x['id']) for x in LiveSyncPlugin.settings.get('excluded_categories')}
Get excluded category IDs.
def del_value(self, keys, complete=False, on_projects=False, on_globals=False, projectname=None, base='', dtype=None, **kwargs): """ Delete a value in the configuration Parameters ---------- keys: list of str A list of keys to be deleted. %(get_value_note)s %(ModelOrganizer.info.common_params)s base: str A base string that shall be put in front of each key in `values` to avoid typing it all the time """ config = self.info(complete=complete, on_projects=on_projects, on_globals=on_globals, projectname=projectname, return_dict=True, insert_id=False, **kwargs) for key in keys: if base: key = base + key key, sub_config = utils.go_through_dict(key, config) del sub_config[key]
Delete a value in the configuration Parameters ---------- keys: list of str A list of keys to be deleted. %(get_value_note)s %(ModelOrganizer.info.common_params)s base: str A base string that shall be put in front of each key in `values` to avoid typing it all the time
def get_instance(uri): """Return an instance of MediaFile.""" global _instances try: instance = _instances[uri] except KeyError: instance = MediaFile( uri, client.get_instance() ) _instances[uri] = instance return instance
Return an instance of MediaFile.
def get_pixel(self, x: int, y: int) -> Tuple[int, int, int]: """Get the color of a pixel in this Image. Args: x (int): X pixel of the Image. Starting from the left at 0. y (int): Y pixel of the Image. Starting from the top at 0. Returns: Tuple[int, int, int]: An (r, g, b) tuple containing the pixels color value. Values are in a 0 to 255 range. """ color = lib.TCOD_image_get_pixel(self.image_c, x, y) return color.r, color.g, color.b
Get the color of a pixel in this Image. Args: x (int): X pixel of the Image. Starting from the left at 0. y (int): Y pixel of the Image. Starting from the top at 0. Returns: Tuple[int, int, int]: An (r, g, b) tuple containing the pixels color value. Values are in a 0 to 255 range.
def replace_label(self, oldLabel, newLabel): """ Replaces old label with a new one """ if oldLabel == newLabel: return tmp = re.compile(r'\b' + oldLabel + r'\b') last = 0 l = len(newLabel) while True: match = tmp.search(self.asm[last:]) if not match: break txt = self.asm self.asm = txt[:last + match.start()] + newLabel + txt[last + match.end():] last += match.start() + l
Replaces old label with a new one
def _get_maxcov_downsample(data): """Calculate maximum coverage downsampling for whole genome samples. Returns None if we're not doing downsampling. """ from bcbio.bam import ref from bcbio.ngsalign import alignprep, bwa from bcbio.variation import coverage fastq_file = data["files"][0] params = alignprep.get_downsample_params(data) if params: num_reads = alignprep.total_reads_from_grabix(fastq_file) if num_reads: vrs = dd.get_variant_regions_merged(data) total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) if vrs: callable_size = pybedtools.BedTool(vrs).total_coverage() genome_cov_pct = callable_size / float(total_size) else: callable_size = total_size genome_cov_pct = 1.0 if (genome_cov_pct > coverage.GENOME_COV_THRESH and dd.get_coverage_interval(data) in ["genome", None, False]): total_counts, total_sizes = 0, 0 for count, size in bwa.fastq_size_output(fastq_file, 5000): total_counts += int(count) total_sizes += (int(size) * int(count)) read_size = float(total_sizes) / float(total_counts) avg_cov = float(num_reads * read_size) / callable_size if avg_cov >= params["min_coverage_for_downsampling"]: return int(avg_cov * params["maxcov_downsample_multiplier"]) return None
Calculate maximum coverage downsampling for whole genome samples. Returns None if we're not doing downsampling.
def get(self, **params): ''' Returns details for a specific offer. .. code-block:: python amadeus.shopping.hotel_offer('XXX').get :rtype: amadeus.Response :raises amadeus.ResponseError: if the request could not be completed ''' return self.client.get('/v2/shopping/hotel-offers/{0}' .format(self.offer_id), **params)
Returns details for a specific offer. .. code-block:: python amadeus.shopping.hotel_offer('XXX').get :rtype: amadeus.Response :raises amadeus.ResponseError: if the request could not be completed
def datetime_to_ns(then): """Transform a :any:`datetime.datetime` into a NationStates-style string. For example "6 days ago", "105 minutes ago", etc. """ if then == datetime(1970, 1, 1, 0, 0): return 'Antiquity' now = datetime.utcnow() delta = now - then seconds = delta.total_seconds() # There's gotta be a better way to do this... years, seconds = divmod(seconds, 60*60*24*365) days, seconds = divmod(seconds, 60*60*24) hours, seconds = divmod(seconds, 60*60) minutes, seconds = divmod(seconds, 60) years = int(years) days = int(days) hours = int(hours) minutes = int(minutes) seconds = round(seconds) if years > 1: if days > 1: return f'{years} years {days} days ago' elif days == 1: return '{years} years 1 day ago' return '{years} years ago' if years == 1: if days > 1: return f'1 year {days} days ago' elif days == 1: return '1 year 1 day ago' return '1 year ago' if days > 3: return f'{days} days ago' if days > 1: if hours > 1: return f'{days} days {hours} hours ago' elif hours == 1: return f'{days} days 1 hour ago' return f'{days} days ago' if days == 1: if hours > 1: return f'1 day {hours} hours ago' elif hours == 1: return '1 day 1 hour ago' return '1 day ago' if hours > 1: return f'{hours} hours ago' if hours == 1: return f'{minutes + 60} minutes ago' if minutes > 1: return f'{minutes} minutes ago' if minutes == 1: return '1 minute ago' return 'Seconds ago'
Transform a :any:`datetime.datetime` into a NationStates-style string. For example "6 days ago", "105 minutes ago", etc.
def to_df(self) -> pd.DataFrame: """Convert to pandas dataframe.""" df = pd.DataFrame(index=RangeIndex(0, self.shape[0], name=None)) for key in self.keys(): value = self[key] for icolumn, column in enumerate(value.T): df['{}{}'.format(key, icolumn+1)] = column return df
Convert to pandas dataframe.
def _get_object_as_soft(self): """Get object as SOFT formatted string.""" soft = [] if self.database is not None: soft.append(self.database._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] for gsm in itervalues(self.gsms): soft.append(gsm._get_object_as_soft()) for gpl in itervalues(self.gpls): soft.append(gpl._get_object_as_soft()) return "\n".join(soft)
Get object as SOFT formatted string.
def assert_no_js_errors(self): """ Asserts that there are no JavaScript "SEVERE"-level page errors. Works ONLY for Chrome (non-headless) and Chrome-based browsers. Does NOT work on Firefox, Edge, IE, and some other browsers: * See https://github.com/SeleniumHQ/selenium/issues/1161 Based on the following Stack Overflow solution: * https://stackoverflow.com/a/41150512/7058266 """ try: browser_logs = self.driver.get_log('browser') except (ValueError, WebDriverException): # If unable to get browser logs, skip the assert and return. return messenger_library = "//cdnjs.cloudflare.com/ajax/libs/messenger" errors = [] for entry in browser_logs: if entry['level'] == 'SEVERE': if messenger_library not in entry['message']: # Add errors if not caused by SeleniumBase dependencies errors.append(entry) if len(errors) > 0: current_url = self.get_current_url() raise Exception( "JavaScript errors found on %s => %s" % (current_url, errors))
Asserts that there are no JavaScript "SEVERE"-level page errors. Works ONLY for Chrome (non-headless) and Chrome-based browsers. Does NOT work on Firefox, Edge, IE, and some other browsers: * See https://github.com/SeleniumHQ/selenium/issues/1161 Based on the following Stack Overflow solution: * https://stackoverflow.com/a/41150512/7058266
def register_languages(): """Register all supported languages to ensure compatibility.""" for language in set(SUPPORTED_LANGUAGES) - {"en"}: language_stemmer = partial(nltk_stemmer, get_language_stemmer(language)) Pipeline.register_function(language_stemmer, "stemmer-{}".format(language))
Register all supported languages to ensure compatibility.
def update(cls, whitelist_sdd_id, monetary_account_paying_id=None, maximum_amount_per_month=None, custom_headers=None): """ :type user_id: int :type whitelist_sdd_id: int :param monetary_account_paying_id: ID of the monetary account of which you want to pay from. :type monetary_account_paying_id: int :param maximum_amount_per_month: The maximum amount of money that is allowed to be deducted based on the whitelist. :type maximum_amount_per_month: object_.Amount :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) request_map = { cls.FIELD_MONETARY_ACCOUNT_PAYING_ID: monetary_account_paying_id, cls.FIELD_MAXIMUM_AMOUNT_PER_MONTH: maximum_amount_per_month } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id(), whitelist_sdd_id) response_raw = api_client.put(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
:type user_id: int :type whitelist_sdd_id: int :param monetary_account_paying_id: ID of the monetary account of which you want to pay from. :type monetary_account_paying_id: int :param maximum_amount_per_month: The maximum amount of money that is allowed to be deducted based on the whitelist. :type maximum_amount_per_month: object_.Amount :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
def stop(self): """Stop this gateway agent.""" if self._disconnector: self._disconnector.stop() self.client.disconnect()
Stop this gateway agent.
def upload_rpm(rpm_path, repoid, connector, callback=None): """upload an rpm into pulp rpm_path: path to an rpm connector: the connector to use for interacting with pulp callback: Optional callback to call after an RPM is uploaded. Callback should accept one argument, the name of the RPM which was uploaded """ ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) info = rpm_info(rpm_path) pkg_name = info['name'] nvrea = info['nvrea'] cksum = info['cksum'] size = info['size'] package_basename = info['package_basename'] juicer.utils.Log.log_notice("Expected amount to seek: %s (package size by os.path.getsize)" % size) # initiate upload upload = juicer.utils.Upload.Upload(package_basename, cksum, size, repoid, connector) #create a statusbar pbar = ProgressBar(size) # read in rpm total_seeked = 0 rpm_fd = open(rpm_path, 'rb') rpm_fd.seek(0) while total_seeked < size: rpm_data = rpm_fd.read(Constants.UPLOAD_AT_ONCE) last_offset = total_seeked total_seeked += len(rpm_data) juicer.utils.Log.log_notice("Seeked %s data... (total seeked: %s)" % (len(rpm_data), total_seeked)) upload_code = upload.append(fdata=rpm_data, offset=last_offset) if upload_code != Constants.PULP_PUT_OK: juicer.utils.Log.log_error("Upload failed.") pbar.update(len(rpm_data)) pbar.finish() rpm_fd.close() juicer.utils.Log.log_notice("Seeked total data: %s" % total_seeked) # finalize upload rpm_id = upload.import_upload(nvrea=nvrea, rpm_name=pkg_name) juicer.utils.Log.log_debug("RPM upload complete. New 'packageid': %s" % rpm_id) # clean up working dir upload.clean_upload() # Run callbacks? if callback: try: juicer.utils.Log.log_debug("Calling upload callack: %s" % str(callback)) callback(pkg_name) except Exception: juicer.utils.Log.log_error("Exception raised in callback: %s", str(callback)) pass return rpm_id
upload an rpm into pulp rpm_path: path to an rpm connector: the connector to use for interacting with pulp callback: Optional callback to call after an RPM is uploaded. Callback should accept one argument, the name of the RPM which was uploaded
def conv_precip_frac(precip_largescale, precip_convective): """Fraction of total precip that is from convection parameterization. Parameters ---------- precip_largescale, precip_convective : xarray.DataArrays Precipitation from grid-scale condensation and from convective parameterization, respectively. Returns ------- xarray.DataArray """ total = total_precip(precip_largescale, precip_convective) # Mask using xarray's `where` method to prevent divide-by-zero. return precip_convective / total.where(total)
Fraction of total precip that is from convection parameterization. Parameters ---------- precip_largescale, precip_convective : xarray.DataArrays Precipitation from grid-scale condensation and from convective parameterization, respectively. Returns ------- xarray.DataArray
def render_template(content, variables): """ Return a bytestring representing a templated file based on the input (content) and the variable names defined (vars). """ fsenc = sys.getfilesystemencoding() def to_native(s, encoding='latin-1', errors='strict'): if six.PY3: if isinstance(s, six.text_type): return s return str(s, encoding, errors) else: if isinstance(s, six.text_type): return s.encode(encoding, errors) return str(s) output = Template( to_native(content, fsenc) ).substitute(variables) if isinstance(output, six.text_type): output = output.encode(fsenc, 'strict') return output
Return a bytestring representing a templated file based on the input (content) and the variable names defined (vars).
def start_service(conn, service='ceph'): """ Stop a service on a remote host depending on the type of init system. Obviously, this should be done for RHEL/Fedora/CentOS systems. This function does not do any kind of detection. """ if is_systemd(conn): remoto.process.run( conn, [ 'systemctl', 'start', '{service}'.format(service=service), ] )
Stop a service on a remote host depending on the type of init system. Obviously, this should be done for RHEL/Fedora/CentOS systems. This function does not do any kind of detection.
def ext_pillar(minion_id, # pylint: disable=W0613 pillar, # pylint: disable=W0613 config_file): ''' Execute LDAP searches and return the aggregated data ''' config_template = None try: config_template = _render_template(config_file) except jinja2.exceptions.TemplateNotFound: log.debug('pillar_ldap: missing configuration file %s', config_file) except Exception: log.debug('pillar_ldap: failed to render template for %s', config_file, exc_info=True) if not config_template: # We don't have a config file return {} import salt.utils.yaml try: opts = salt.utils.yaml.safe_load(config_template) or {} opts['conf_file'] = config_file except Exception as err: import salt.log msg = 'pillar_ldap: error parsing configuration file: {0} - {1}'.format( config_file, err ) if salt.log.is_console_configured(): log.warning(msg) else: print(msg) return {} else: if not isinstance(opts, dict): log.warning( 'pillar_ldap: %s is invalidly formatted, must be a YAML ' 'dictionary. See the documentation for more information.', config_file ) return {} if 'search_order' not in opts: log.warning( 'pillar_ldap: search_order missing from configuration. See the ' 'documentation for more information.' ) return {} data = {} for source in opts['search_order']: config = opts[source] result = _do_search(config) log.debug('source %s got result %s', source, result) if result: data = _result_to_dict(data, result, config, source) return data
Execute LDAP searches and return the aggregated data
def adjust_hours_view(request, semester): """ Adjust members' workshift hours requirements. """ page_name = "Adjust Hours" pools = WorkshiftPool.objects.filter(semester=semester).order_by( "-is_primary", "title", ) workshifters = WorkshiftProfile.objects.filter(semester=semester) pool_hour_forms = [] for workshifter in workshifters: forms_list = [] for pool in pools: hours = workshifter.pool_hours.get(pool=pool) forms_list.append(( AdjustHoursForm( data=request.POST or None, prefix="pool_hours-{}".format(hours.pk), instance=hours, ), hours, )) pool_hour_forms.append(forms_list) if all( form.is_valid() for workshifter_forms in pool_hour_forms for form, pool_hours in workshifter_forms ): for workshifter_forms in pool_hour_forms: for form, pool_hours in workshifter_forms: form.save() messages.add_message(request, messages.INFO, "Updated hours.") return HttpResponseRedirect(wurl( "workshift:adjust_hours", sem_url=semester.sem_url, )) return render_to_response("adjust_hours.html", { "page_name": page_name, "pools": pools, "workshifters_tuples": zip(workshifters, pool_hour_forms), }, context_instance=RequestContext(request))
Adjust members' workshift hours requirements.
def find_trigger_value(psd_var, idx, start, sample_rate): """ Find the PSD variation value at a particular time Parameters ---------- psd_var : TimeSeries Time series of the varaibility in the PSD estimation idx : numpy.ndarray Time indices of the triggers start : float GPS start time sample_rate : float Sample rate defined in ini file Returns ------- vals : Array PSD variation value at a particular time """ # Find gps time of the trigger time = start + idx / sample_rate # Find where in the psd variation time series the trigger belongs ind = numpy.digitize(time, psd_var.sample_times) ind -= 1 vals = psd_var[ind] return vals
Find the PSD variation value at a particular time Parameters ---------- psd_var : TimeSeries Time series of the varaibility in the PSD estimation idx : numpy.ndarray Time indices of the triggers start : float GPS start time sample_rate : float Sample rate defined in ini file Returns ------- vals : Array PSD variation value at a particular time
def next_basis_label_or_index(self, label_or_index, n=1): """Given the label or index of a basis state, return the label/index of the next basis state. More generally, if `n` is given, return the `n`'th next basis state label/index; `n` may also be negative to obtain previous basis state labels/indices. The return type is the same as the type of `label_or_index`. Args: label_or_index (int or str or SymbolicLabelBase): If `int`, the index of a basis state; if `str`, the label of a basis state n (int): The increment Raises: IndexError: If going beyond the last or first basis state ValueError: If `label` is not a label for any basis state in the Hilbert space .BasisNotSetError: If the Hilbert space has no defined basis TypeError: if `label_or_index` is neither a :class:`str` nor an :class:`int`, nor a :class:`SymbolicLabelBase` """ if isinstance(label_or_index, int): new_index = label_or_index + n if new_index < 0: raise IndexError("index %d < 0" % new_index) if self.has_basis: if new_index >= self.dimension: raise IndexError("index %d out of range for basis %s" % (new_index, self._basis)) return new_index elif isinstance(label_or_index, str): label_index = self.basis_labels.index(label_or_index) new_index = label_index + n if (new_index < 0) or (new_index >= len(self._basis)): raise IndexError("index %d out of range for basis %s" % (new_index, self._basis)) return self._basis[new_index] elif isinstance(label_or_index, SymbolicLabelBase): return label_or_index.__class__(expr=label_or_index.expr + n) else: raise TypeError( "Invalid type for label_or_index: %s" % label_or_index.__class__.__name__)
Given the label or index of a basis state, return the label/index of the next basis state. More generally, if `n` is given, return the `n`'th next basis state label/index; `n` may also be negative to obtain previous basis state labels/indices. The return type is the same as the type of `label_or_index`. Args: label_or_index (int or str or SymbolicLabelBase): If `int`, the index of a basis state; if `str`, the label of a basis state n (int): The increment Raises: IndexError: If going beyond the last or first basis state ValueError: If `label` is not a label for any basis state in the Hilbert space .BasisNotSetError: If the Hilbert space has no defined basis TypeError: if `label_or_index` is neither a :class:`str` nor an :class:`int`, nor a :class:`SymbolicLabelBase`
def __execute_rot(self, surface): """Executes the rotating operation""" self.image = pygame.transform.rotate(surface, self.__rotation) self.__resize_surface_extents()
Executes the rotating operation
def set_elapsed_time(self, client): """Set elapsed time for slave clients.""" related_clients = self.get_related_clients(client) for cl in related_clients: if cl.timer is not None: client.create_time_label() client.t0 = cl.t0 client.timer.timeout.connect(client.show_time) client.timer.start(1000) break
Set elapsed time for slave clients.
def flatten (d, *keys): """Flattens the dictionary d by merging keys in order such that later keys take precedence over earlier keys. """ flat = { } for k in keys: flat = merge(flat, d.pop(k, { })) return flat
Flattens the dictionary d by merging keys in order such that later keys take precedence over earlier keys.
def create_widget(self): """ Create the underlying widget. A dialog is not a subclass of view, hence we don't set name as widget or children will try to use it as their parent. """ d = self.declaration self.dialog = Dialog(self.get_context(), d.style)
Create the underlying widget. A dialog is not a subclass of view, hence we don't set name as widget or children will try to use it as their parent.
def balance(self): """Returns a tuple of (total amount deposited, total amount withdrawn).""" sin = Decimal("0.00") sout = Decimal("0.00") for t in self.trans: if t.amount < Decimal("0.00"): sout += t.amount else: sin += t.amount return sin, sout
Returns a tuple of (total amount deposited, total amount withdrawn).
def get_plain_image_as_widget(self): """Used for generating thumbnails. Does not include overlaid graphics. """ pixbuf = self.get_plain_image_as_pixbuf() image = Gtk.Image() image.set_from_pixbuf(pixbuf) image.show() return image
Used for generating thumbnails. Does not include overlaid graphics.
def deploy_clone_from_vm(self, context, deploy_action, cancellation_context): """ Deploy Cloned VM From VM Command, will deploy vm from template :param CancellationContext cancellation_context: :param ResourceCommandContext context: the context of the command :param DeployApp deploy_action: :return DeployAppResult deploy results """ deploy_from_vm_model = self.resource_model_parser.convert_to_resource_model( attributes=deploy_action.actionParams.deployment.attributes, resource_model_type=vCenterCloneVMFromVMResourceModel) data_holder = DeployFromTemplateDetails(deploy_from_vm_model, deploy_action.actionParams.appName) deploy_result_action = self.command_wrapper.execute_command_with_connection( context, self.deploy_command.execute_deploy_clone_from_vm, data_holder, cancellation_context, self.folder_manager) deploy_result_action.actionId = deploy_action.actionId return deploy_result_action
Deploy Cloned VM From VM Command, will deploy vm from template :param CancellationContext cancellation_context: :param ResourceCommandContext context: the context of the command :param DeployApp deploy_action: :return DeployAppResult deploy results
def polygon_from_points(points): """ Constructs a numpy-compatible polygon from a page representation. """ polygon = [] for pair in points.split(" "): x_y = pair.split(",") polygon.append([float(x_y[0]), float(x_y[1])]) return polygon
Constructs a numpy-compatible polygon from a page representation.
def connect_to_ec2(region='us-east-1', access_key=None, secret_key=None): """ Connect to AWS ec2 :type region: str :param region: AWS region to connect to :type access_key: str :param access_key: AWS access key id :type secret_key: str :param secret_key: AWS secret access key :returns: boto.ec2.connection.EC2Connection -- EC2 connection """ if access_key: # Connect using supplied credentials logger.info('Connecting to AWS EC2 in {}'.format(region)) connection = ec2.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) else: # Fetch instance metadata metadata = get_instance_metadata(timeout=1, num_retries=1) if metadata: try: region = metadata['placement']['availability-zone'][:-1] except KeyError: pass # Connect using env vars or boto credentials logger.info('Connecting to AWS EC2 in {}'.format(region)) connection = ec2.connect_to_region(region) if not connection: logger.error('An error occurred when connecting to EC2') sys.exit(1) return connection
Connect to AWS ec2 :type region: str :param region: AWS region to connect to :type access_key: str :param access_key: AWS access key id :type secret_key: str :param secret_key: AWS secret access key :returns: boto.ec2.connection.EC2Connection -- EC2 connection
def make_folium_polyline(edge, edge_color, edge_width, edge_opacity, popup_attribute=None): """ Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with attributes. Parameters ---------- edge : GeoSeries a row from the gdf_edges GeoDataFrame edge_color : string color of the edge lines edge_width : numeric width of the edge lines edge_opacity : numeric opacity of the edge lines popup_attribute : string edge attribute to display in a pop-up when an edge is clicked, if None, no popup Returns ------- pl : folium.PolyLine """ # check if we were able to import folium successfully if not folium: raise ImportError('The folium package must be installed to use this optional feature.') # locations is a list of points for the polyline # folium takes coords in lat,lon but geopandas provides them in lon,lat # so we have to flip them around locations = list([(lat, lon) for lon, lat in edge['geometry'].coords]) # if popup_attribute is None, then create no pop-up if popup_attribute is None: popup = None else: # folium doesn't interpret html in the html argument (weird), so can't # do newlines without an iframe popup_text = json.dumps(edge[popup_attribute]) popup = folium.Popup(html=popup_text) # create a folium polyline with attributes pl = folium.PolyLine(locations=locations, popup=popup, color=edge_color, weight=edge_width, opacity=edge_opacity) return pl
Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with attributes. Parameters ---------- edge : GeoSeries a row from the gdf_edges GeoDataFrame edge_color : string color of the edge lines edge_width : numeric width of the edge lines edge_opacity : numeric opacity of the edge lines popup_attribute : string edge attribute to display in a pop-up when an edge is clicked, if None, no popup Returns ------- pl : folium.PolyLine
def format(self, record) -> str: """ :type record: aiologger.loggers.json.LogRecord """ msg = dict(self.formatter_fields_for_record(record)) if record.flatten and isinstance(record.msg, dict): msg.update(record.msg) else: msg[MSG_FIELDNAME] = record.msg if record.extra: msg.update(record.extra) if record.exc_info: msg["exc_info"] = record.exc_info if record.exc_text: msg["exc_text"] = record.exc_text return self.serializer( msg, default=self._default_handler, **record.serializer_kwargs )
:type record: aiologger.loggers.json.LogRecord
def _get_covariance(self, X): """Compute covariance matrix with transformed data. Args: X: `numpy.ndarray` or `pandas.DataFrame`. Returns: np.ndarray """ result = pd.DataFrame(index=range(len(X))) column_names = self.get_column_names(X) for column_name in column_names: column = self.get_column(X, column_name) distrib = self.distribs[column_name] # get original distrib's cdf of the column cdf = distrib.cumulative_distribution(column) if distrib.constant_value is not None: # This is to avoid np.inf in the case the column is constant. cdf = np.ones(column.shape) - EPSILON # get inverse cdf using standard normal result = self.set_column(result, column_name, stats.norm.ppf(cdf)) # remove any rows that have infinite values result = result[(result != np.inf).all(axis=1)] return pd.DataFrame(data=result).cov().values
Compute covariance matrix with transformed data. Args: X: `numpy.ndarray` or `pandas.DataFrame`. Returns: np.ndarray
def _initial_broks(self, broker_name): """Get initial_broks from the scheduler This is used by the brokers to prepare the initial status broks This do not send broks, it only makes scheduler internal processing. Then the broker must use the *_broks* API to get all the stuff :param broker_name: broker name, used to filter broks :type broker_name: str :return: None """ with self.app.conf_lock: logger.info("A new broker just connected : %s", broker_name) return self.app.sched.fill_initial_broks(broker_name)
Get initial_broks from the scheduler This is used by the brokers to prepare the initial status broks This do not send broks, it only makes scheduler internal processing. Then the broker must use the *_broks* API to get all the stuff :param broker_name: broker name, used to filter broks :type broker_name: str :return: None
def prior_groups(self): """get the prior info groups Returns ------- prior_groups : list a list of prior information groups """ og = list(self.prior_information.groupby("obgnme").groups.keys()) #og = list(map(pst_utils.SFMT, og)) return og
get the prior info groups Returns ------- prior_groups : list a list of prior information groups
def _scale(self, mode): """ Returns value scaling coefficient for the given mode. """ if mode in self._mode_scale: scale = self._mode_scale[mode] else: scale = 10**(-self.decimals) self._mode_scale[mode] = scale return scale
Returns value scaling coefficient for the given mode.
def __get_percpu(self): """Update and/or return the per CPU list using the psutil library.""" # Never update more than 1 time per cached_time if self.timer_percpu.finished(): self.percpu_percent = [] for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)): cpu = {'key': self.get_key(), 'cpu_number': cpu_number, 'total': round(100 - cputimes.idle, 1), 'user': cputimes.user, 'system': cputimes.system, 'idle': cputimes.idle} # The following stats are for API purposes only if hasattr(cputimes, 'nice'): cpu['nice'] = cputimes.nice if hasattr(cputimes, 'iowait'): cpu['iowait'] = cputimes.iowait if hasattr(cputimes, 'irq'): cpu['irq'] = cputimes.irq if hasattr(cputimes, 'softirq'): cpu['softirq'] = cputimes.softirq if hasattr(cputimes, 'steal'): cpu['steal'] = cputimes.steal if hasattr(cputimes, 'guest'): cpu['guest'] = cputimes.guest if hasattr(cputimes, 'guest_nice'): cpu['guest_nice'] = cputimes.guest_nice # Append new CPU to the list self.percpu_percent.append(cpu) # Reset timer for cache self.timer_percpu = Timer(self.cached_time) return self.percpu_percent
Update and/or return the per CPU list using the psutil library.
def write_bytes(out_data, encoding="ascii"): """Write Python2 and Python3 compatible byte stream.""" if sys.version_info[0] >= 3: if isinstance(out_data, type("")): if encoding == "utf-8": return out_data.encode("utf-8") else: return out_data.encode("ascii", "ignore") elif isinstance(out_data, type(b"")): return out_data else: if isinstance(out_data, type("")): if encoding == "utf-8": return out_data.encode("utf-8") else: return out_data.encode("ascii", "ignore") elif isinstance(out_data, type(str(""))): return out_data msg = "Invalid value for out_data neither unicode nor byte string: {}".format( out_data ) raise ValueError(msg)
Write Python2 and Python3 compatible byte stream.
def get_create_base_agent(self, agent): """Return base agent with given name, creating it if needed.""" try: base_agent = self.agents[_n(agent.name)] except KeyError: base_agent = BaseAgent(_n(agent.name)) self.agents[_n(agent.name)] = base_agent # If it's a molecular agent if isinstance(agent, Agent): # Handle bound conditions for bc in agent.bound_conditions: bound_base_agent = self.get_create_base_agent(bc.agent) bound_base_agent.create_site(get_binding_site_name(agent)) base_agent.create_site(get_binding_site_name(bc.agent)) # Handle modification conditions for mc in agent.mods: base_agent.create_mod_site(mc) # Handle mutation conditions for mc in agent.mutations: res_from = mc.residue_from if mc.residue_from else 'mut' res_to = mc.residue_to if mc.residue_to else 'X' if mc.position is None: mut_site_name = res_from else: mut_site_name = res_from + mc.position base_agent.create_site(mut_site_name, states=['WT', res_to]) # Handle location condition if agent.location is not None: base_agent.create_site('loc', [_n(agent.location)]) # Handle activity if agent.activity is not None: site_name = agent.activity.activity_type base_agent.create_site(site_name, ['inactive', 'active']) # There might be overwrites here for db_name, db_ref in agent.db_refs.items(): base_agent.db_refs[db_name] = db_ref return base_agent
Return base agent with given name, creating it if needed.
def handle_call_response(self, result, node): """ If we get a response, add the node to the routing table. If we get no response, make sure it's removed from the routing table. """ if not result[0]: log.warning("no response from %s, removing from router", node) self.router.remove_contact(node) return result log.info("got successful response from %s", node) self.welcome_if_new(node) return result
If we get a response, add the node to the routing table. If we get no response, make sure it's removed from the routing table.
def get_user_modules(self): """ Search configured include directories for user provided modules. user_modules: { 'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py') } """ user_modules = {} for include_path in self.config["include_paths"]: for f_name in sorted(os.listdir(include_path)): if not f_name.endswith(".py"): continue module_name = f_name[:-3] # do not overwrite modules if already found if module_name in user_modules: pass user_modules[module_name] = (include_path, f_name) return user_modules
Search configured include directories for user provided modules. user_modules: { 'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py') }
def get_system_root_directory(): """ Get system root directory (application installed root directory) Returns ------- string A full path """ root = os.path.dirname(__file__) root = os.path.dirname(root) root = os.path.abspath(root) return root
Get system root directory (application installed root directory) Returns ------- string A full path
def load_script(zap_helper, **options): """Load a script from a file.""" with zap_error_handler(): if not os.path.isfile(options['file_path']): raise ZAPError('No file found at "{0}", cannot load script.'.format(options['file_path'])) if not _is_valid_script_engine(zap_helper.zap, options['engine']): engines = zap_helper.zap.script.list_engines raise ZAPError('Invalid script engine provided. Valid engines are: {0}'.format(', '.join(engines))) console.debug('Loading script "{0}" from "{1}"'.format(options['name'], options['file_path'])) result = zap_helper.zap.script.load(options['name'], options['script_type'], options['engine'], options['file_path'], scriptdescription=options['description']) if result != 'OK': raise ZAPError('Error loading script: {0}'.format(result)) console.info('Script "{0}" loaded'.format(options['name']))
Load a script from a file.
def max(self): """Maximum, ignorning nans.""" if "max" not in self.attrs.keys(): def f(dataset, s): return np.nanmax(dataset[s]) self.attrs["max"] = np.nanmax(list(self.chunkwise(f).values())) return self.attrs["max"]
Maximum, ignorning nans.
def clusterQueues(self): """ Return a dict of queues in cluster and servers running them """ servers = yield self.getClusterServers() queues = {} for sname in servers: qs = yield self.get('rhumba.server.%s.queues' % sname) uuid = yield self.get('rhumba.server.%s.uuid' % sname) qs = json.loads(qs) for q in qs: if q not in queues: queues[q] = [] queues[q].append({'host': sname, 'uuid': uuid}) defer.returnValue(queues)
Return a dict of queues in cluster and servers running them
def get(self, name, default=None): """Get the value at ``name`` for this :class:`Config` container The returned value is obtained from: * the value at ``name`` in the :attr:`settings` dictionary if available. * the value at ``name`` in the :attr:`params` dictionary if available. * the ``default`` value. """ try: return self._get(name, default) except KeyError: return default
Get the value at ``name`` for this :class:`Config` container The returned value is obtained from: * the value at ``name`` in the :attr:`settings` dictionary if available. * the value at ``name`` in the :attr:`params` dictionary if available. * the ``default`` value.
def search_variant_annotations( self, variant_annotation_set_id, reference_name="", reference_id="", start=0, end=0, effects=[]): """ Returns an iterator over the Variant Annotations fulfilling the specified conditions from the specified VariantSet. :param str variant_annotation_set_id: The ID of the :class:`ga4gh.protocol.VariantAnnotationSet` of interest. :param int start: Required. The beginning of the window (0-based, inclusive) for which overlapping variants should be returned. Genomic positions are non-negative integers less than reference length. Requests spanning the join of circular genomes are represented as two requests one on each side of the join (position 0). :param int end: Required. The end of the window (0-based, exclusive) for which overlapping variants should be returned. :param str reference_name: The name of the :class:`ga4gh.protocol.Reference` we wish to return variants from. :return: An iterator over the :class:`ga4gh.protocol.VariantAnnotation` objects defined by the query parameters. :rtype: iter """ request = protocol.SearchVariantAnnotationsRequest() request.variant_annotation_set_id = variant_annotation_set_id request.reference_name = reference_name request.reference_id = reference_id request.start = start request.end = end for effect in effects: request.effects.add().CopyFrom(protocol.OntologyTerm(**effect)) for effect in request.effects: if not effect.term_id: raise exceptions.ErrantRequestException( "Each ontology term should have an id set") request.page_size = pb.int(self._page_size) return self._run_search_request( request, "variantannotations", protocol.SearchVariantAnnotationsResponse)
Returns an iterator over the Variant Annotations fulfilling the specified conditions from the specified VariantSet. :param str variant_annotation_set_id: The ID of the :class:`ga4gh.protocol.VariantAnnotationSet` of interest. :param int start: Required. The beginning of the window (0-based, inclusive) for which overlapping variants should be returned. Genomic positions are non-negative integers less than reference length. Requests spanning the join of circular genomes are represented as two requests one on each side of the join (position 0). :param int end: Required. The end of the window (0-based, exclusive) for which overlapping variants should be returned. :param str reference_name: The name of the :class:`ga4gh.protocol.Reference` we wish to return variants from. :return: An iterator over the :class:`ga4gh.protocol.VariantAnnotation` objects defined by the query parameters. :rtype: iter
def bootstrap(directory='.', config='buildout.cfg', python=sys.executable, onlyif=None, unless=None, runas=None, env=(), distribute=None, buildout_ver=None, test_release=False, offline=False, new_st=None, use_vt=False, loglevel=None): ''' Run the buildout bootstrap dance (python bootstrap.py). directory directory to execute in config alternative buildout configuration file to use runas User used to run buildout as env environment variables to set when running buildout_ver force a specific buildout version (1 | 2) test_release buildout accept test release offline are we executing buildout in offline mode distribute Forcing use of distribute new_st Forcing use of setuptools >= 0.7 python path to a python executable to use in place of default (salt one) onlyif Only execute cmd if statement on the host return 0 unless Do not execute cmd if statement on the host return 0 use_vt Use the new salt VT to stream output [experimental] CLI Example: .. code-block:: bash salt '*' buildout.bootstrap /srv/mybuildout ''' directory = os.path.abspath(directory) dbuild = _dot_buildout(directory) bootstrap_args = '' has_distribute = _has_old_distribute(python=python, runas=runas, env=env) has_new_st = _has_setuptools7(python=python, runas=runas, env=env) if ( has_distribute and has_new_st and not distribute and new_st ): new_st = True distribute = False if ( has_distribute and has_new_st and not distribute and new_st ): new_st = True distribute = False if ( has_distribute and has_new_st and distribute and not new_st ): new_st = True distribute = False if ( has_distribute and has_new_st and not distribute and not new_st ): new_st = True distribute = False if ( not has_distribute and has_new_st and not distribute and new_st ): new_st = True distribute = False if ( not has_distribute and has_new_st and not distribute and new_st ): new_st = True distribute = False if ( not has_distribute and has_new_st and distribute and not new_st ): new_st = True distribute = False if ( not has_distribute and has_new_st and not distribute and not new_st ): new_st = True distribute = False if ( has_distribute and not has_new_st and not distribute and new_st ): new_st = True distribute = False if ( has_distribute and not has_new_st and not distribute and new_st ): new_st = True distribute = False if ( has_distribute and not has_new_st and distribute and not new_st ): new_st = False distribute = True if ( has_distribute and not has_new_st and not distribute and not new_st ): new_st = False distribute = True if ( not has_distribute and not has_new_st and not distribute and new_st ): new_st = True distribute = False if ( not has_distribute and not has_new_st and not distribute and new_st ): new_st = True distribute = False if ( not has_distribute and not has_new_st and distribute and not new_st ): new_st = False distribute = True if ( not has_distribute and not has_new_st and not distribute and not new_st ): new_st = True distribute = False if new_st and distribute: distribute = False if new_st: distribute = False LOG.warning('Forcing to use setuptools as we have setuptools >= 0.7') if distribute: new_st = False if buildout_ver == 1: LOG.warning('Using distribute !') bootstrap_args += ' --distribute' if not os.path.isdir(dbuild): os.makedirs(dbuild) upgrade_bootstrap(directory, offline=offline, buildout_ver=buildout_ver) # be sure which buildout bootstrap we have b_py = os.path.join(directory, 'bootstrap.py') with salt.utils.files.fopen(b_py) as fic: content = salt.utils.stringutils.to_unicode(fic.read()) if ( (test_release is not False) and ' --accept-buildout-test-releases' in content ): bootstrap_args += ' --accept-buildout-test-releases' if config and '"-c"' in content: bootstrap_args += ' -c {0}'.format(config) # be sure that the bootstrap belongs to the running user try: if runas: uid = __salt__['user.info'](runas)['uid'] gid = __salt__['user.info'](runas)['gid'] os.chown('bootstrap.py', uid, gid) except (IOError, OSError) as exc: # don't block here, try to execute it if can pass _logger.error('BUILDOUT bootstrap permissions error: %s', exc, exc_info=_logger.isEnabledFor(logging.DEBUG)) cmd = '{0} bootstrap.py {1}'.format(python, bootstrap_args) ret = _Popen(cmd, directory=directory, runas=runas, loglevel=loglevel, env=env, use_vt=use_vt) output = ret['output'] return {'comment': cmd, 'out': output}
Run the buildout bootstrap dance (python bootstrap.py). directory directory to execute in config alternative buildout configuration file to use runas User used to run buildout as env environment variables to set when running buildout_ver force a specific buildout version (1 | 2) test_release buildout accept test release offline are we executing buildout in offline mode distribute Forcing use of distribute new_st Forcing use of setuptools >= 0.7 python path to a python executable to use in place of default (salt one) onlyif Only execute cmd if statement on the host return 0 unless Do not execute cmd if statement on the host return 0 use_vt Use the new salt VT to stream output [experimental] CLI Example: .. code-block:: bash salt '*' buildout.bootstrap /srv/mybuildout
def net_query(name: str) -> Constants: '''Find the NetworkParams for a network by its long or short name. Raises UnsupportedNetwork if no NetworkParams is found. ''' for net_params in networks: if name in (net_params.name, net_params.shortname,): return net_params raise UnsupportedNetwork
Find the NetworkParams for a network by its long or short name. Raises UnsupportedNetwork if no NetworkParams is found.
def setHoverIcon( self, column, icon ): """ Returns the icon to use when coloring when the user hovers over the item for the given column. :param column | <int> icon | <QtGui.QIcon) """ self._hoverIcon[column] = QtGui.QIcon(icon)
Returns the icon to use when coloring when the user hovers over the item for the given column. :param column | <int> icon | <QtGui.QIcon)
def complex_to_real(complex_fid): """ Standard optimization routines as used in lmfit require real data. This function takes a complex FID and constructs a real version by concatenating the imaginary part to the complex part. The imaginary part is also reversed to keep the maxima at each end of the FID and avoid discontinuities in the center. :param complex_fid: the complex FID to be converted to real. :return: the real FID, which has twice as many points as the input. """ np = complex_fid.shape[0] real_fid = numpy.zeros(np * 2) real_fid[:np] = complex_fid.real real_fid[np:] = complex_fid.imag[::-1] return real_fid
Standard optimization routines as used in lmfit require real data. This function takes a complex FID and constructs a real version by concatenating the imaginary part to the complex part. The imaginary part is also reversed to keep the maxima at each end of the FID and avoid discontinuities in the center. :param complex_fid: the complex FID to be converted to real. :return: the real FID, which has twice as many points as the input.
def extract_gcc_binaries(): """Try to find GCC on OSX for OpenMP support.""" patterns = ['/opt/local/bin/g++-mp-[0-9].[0-9]', '/opt/local/bin/g++-mp-[0-9]', '/usr/local/bin/g++-[0-9].[0-9]', '/usr/local/bin/g++-[0-9]'] if 'darwin' in platform.platform().lower(): gcc_binaries = [] for pattern in patterns: gcc_binaries += glob.glob(pattern) gcc_binaries.sort() if gcc_binaries: _, gcc = os.path.split(gcc_binaries[-1]) return gcc else: return None else: return None
Try to find GCC on OSX for OpenMP support.
def _build_index(maf_strm, ref_spec): """Build an index for a MAF genome alig file and return StringIO of it.""" idx_strm = StringIO.StringIO() bound_iter = functools.partial(genome_alignment_iterator, reference_species=ref_spec) hash_func = JustInTimeGenomeAlignmentBlock.build_hash idx = IndexedFile(maf_strm, bound_iter, hash_func) idx.write_index(idx_strm) idx_strm.seek(0) # seek to the start return idx_strm
Build an index for a MAF genome alig file and return StringIO of it.
def _rectify_countdown_or_bool(count_or_bool): """ used by recrusive functions to specify which level to turn a bool on in counting down yeilds True, True, ..., False conting up yeilds False, False, False, ... True Args: count_or_bool (bool or int): if positive will count down, if negative will count up, if bool will remain same Returns: int or bool: count_or_bool_ CommandLine: python -m utool.util_str --test-_rectify_countdown_or_bool Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import _rectify_countdown_or_bool # NOQA >>> count_or_bool = True >>> a1 = (_rectify_countdown_or_bool(2)) >>> a2 = (_rectify_countdown_or_bool(1)) >>> a3 = (_rectify_countdown_or_bool(0)) >>> a4 = (_rectify_countdown_or_bool(-1)) >>> a5 = (_rectify_countdown_or_bool(-2)) >>> a6 = (_rectify_countdown_or_bool(True)) >>> a7 = (_rectify_countdown_or_bool(False)) >>> result = [a1, a2, a3, a4, a5, a6, a7] >>> print(result) [1.0, 0.0, 0, 0.0, -1.0, True, False] [1.0, True, False, False, -1.0, True, False] """ if count_or_bool is True or count_or_bool is False: count_or_bool_ = count_or_bool elif isinstance(count_or_bool, int): if count_or_bool == 0: return 0 sign_ = math.copysign(1, count_or_bool) count_or_bool_ = int(count_or_bool - sign_) #if count_or_bool_ == 0: # return sign_ == 1 else: count_or_bool_ = False return count_or_bool_
used by recrusive functions to specify which level to turn a bool on in counting down yeilds True, True, ..., False conting up yeilds False, False, False, ... True Args: count_or_bool (bool or int): if positive will count down, if negative will count up, if bool will remain same Returns: int or bool: count_or_bool_ CommandLine: python -m utool.util_str --test-_rectify_countdown_or_bool Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import _rectify_countdown_or_bool # NOQA >>> count_or_bool = True >>> a1 = (_rectify_countdown_or_bool(2)) >>> a2 = (_rectify_countdown_or_bool(1)) >>> a3 = (_rectify_countdown_or_bool(0)) >>> a4 = (_rectify_countdown_or_bool(-1)) >>> a5 = (_rectify_countdown_or_bool(-2)) >>> a6 = (_rectify_countdown_or_bool(True)) >>> a7 = (_rectify_countdown_or_bool(False)) >>> result = [a1, a2, a3, a4, a5, a6, a7] >>> print(result) [1.0, 0.0, 0, 0.0, -1.0, True, False] [1.0, True, False, False, -1.0, True, False]
def handle_key_cache(self): ''' Evaluate accepted keys and create a msgpack file which contains a list ''' if self.opts['key_cache'] == 'sched': keys = [] #TODO DRY from CKMinions if self.opts['transport'] in ('zeromq', 'tcp'): acc = 'minions' else: acc = 'accepted' for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)): if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)): keys.append(fn_) log.debug('Writing master key cache') # Write a temporary file securely if six.PY2: with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file: self.serial.dump(keys, cache_file) else: with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file: self.serial.dump(keys, cache_file)
Evaluate accepted keys and create a msgpack file which contains a list
def report_change(self, name, value, maxdiff=1, deltat=10): '''report a sensor change''' r = self.reports[name] if time.time() < r.last_report + deltat: return r.last_report = time.time() if math.fabs(r.value - value) < maxdiff: return r.value = value self.say("%s %u" % (name, value))
report a sensor change
def apply(self, coro_function, args=None, kwargs=None, callback=None): """Submit a coro_function(*args, **kwargs) as NewTask to self.loop with loop.frequncy control. :: from torequests.dummy import Loop import asyncio loop = Loop() async def test(i): result = await asyncio.sleep(1) return (loop.frequency, i) task = loop.apply(test, [1]) print(task) # loop.x can be ignore loop.x print(task.x) # <NewTask pending coro=<new_coro_func() running at torequests/torequests/dummy.py:154>> # (Frequency(sem=<0/0>, interval=0, name=loop_sem), 1) """ args = args or () kwargs = kwargs or {} coro = self._wrap_coro_function_with_sem(coro_function)(*args, **kwargs) return self.submit(coro, callback=callback)
Submit a coro_function(*args, **kwargs) as NewTask to self.loop with loop.frequncy control. :: from torequests.dummy import Loop import asyncio loop = Loop() async def test(i): result = await asyncio.sleep(1) return (loop.frequency, i) task = loop.apply(test, [1]) print(task) # loop.x can be ignore loop.x print(task.x) # <NewTask pending coro=<new_coro_func() running at torequests/torequests/dummy.py:154>> # (Frequency(sem=<0/0>, interval=0, name=loop_sem), 1)