docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Works like "filter" but joins given filters with OR operator. Args: **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Example: >>> Person.objects.or_filter(age__gte=16, name__startswith='jo')
def or_filter(self, **filters): clone = copy.deepcopy(self) clone.adapter.add_query([("OR_QRY", filters)]) return clone
1,036,311
Applies query ordering. Args: **args: Order by fields names. Defaults to ascending, prepend with hypen (-) for desecending ordering. Returns: Self. Queryset object. Examples: >>> Person.objects.order_by('-name', 'join_date')
def order_by(self, *args): clone = copy.deepcopy(self) clone.adapter.ordered = True if args: clone.adapter.order_by(*args) return clone
1,036,314
make a raw query Args: query (str): solr query \*\*params: solr parameters
def raw(self, query): clone = copy.deepcopy(self) clone.adapter._pre_compiled_query = query clone.adapter.compiled_query = query return clone
1,036,317
Naively slice each data object in the container by the object's index. Args: key: Int, slice, or list by which to extra "sub"-container Returns: sub: Sub container of the same format with a view of the data Warning: To ensure that a new container is created...
def slice_naive(self, key): kwargs = {'name': self.name, 'description': self.description, 'meta': self.meta} for name, data in self._data().items(): k = name[1:] if name.startswith('_') else name kwargs[k] = data.slice_naive(key) return self.__class__(**kwargs)
1,036,393
Get the memory usage estimate of the container. Args: string (bool): Human readable string (default false) See Also: :func:`~exa.core.container.Container.info`
def memory_usage(self, string=False): if string: n = getsizeof(self) return ' '.join((str(s) for s in convert_bytes(n))) return self.info()['size']
1,036,397
Save the container as an HDF5 archive. Args: path (str): Path where to save the container
def save(self, path=None, complevel=1, complib='zlib'): if path is None: path = self.hexuid + '.hdf5' elif os.path.isdir(path): path += os.sep + self.hexuid + '.hdf5' elif not (path.endswith('.hdf5') or path.endswith('.hdf')): raise ValueError('File p...
1,036,399
Load a container object from a persistent location or file path. Args: pkid_or_path: Integer pkid corresponding to the container table or file path Returns: container: The saved container object
def load(cls, pkid_or_path=None): path = pkid_or_path if isinstance(path, (int, np.int32, np.int64)): raise NotImplementedError('Lookup via CMS not implemented.') elif not os.path.isfile(path): raise FileNotFoundError('File {} not found.'.format(path)) kw...
1,036,400
Usage: with (--version | <command>) Arguments: command The command to use as prefix to your context. Options: -h --help Show this screen. --version Show the current version.
def main(): arguments = docopt(main.__doc__) if arguments.get('--version'): print('with {}'.format(withtool.__version__)) sys.exit() while True: sub = yield from get_prompt(arguments['<command>']) call = '{cmd} {sub}'.format(cmd=arguments['<command>'], sub=sub) ...
1,036,613
Represents the class as a MappingNode. Args: dumper: The dumper to use. data: The user-defined object to dump. Returns: A yaml.Node representing the object.
def __call__(self, dumper: 'Dumper', data: Any) -> yaml.MappingNode: # make a dict with attributes logger.info('Representing {} of class {}'.format( data, self.class_.__name__)) if hasattr(data, 'yatiml_attributes'): logger.debug('Found yatiml_attributes()') ...
1,036,643
Applies the user's yatiml_sweeten() function(s), if any. Sweetening is done for the base classes first, then for the \ derived classes, down the hierarchy to the class we're \ constructing. Args: dumper: The dumper that is dumping this object. class_: The type o...
def __sweeten(self, dumper: 'Dumper', class_: Type, node: Node) -> None: for base_class in class_.__bases__: if base_class in dumper.yaml_representers: logger.debug('Sweetening for class {}'.format( self.class_.__name__)) self.__sweeten(du...
1,036,644
Represents the class as a ScalarNode. Args: dumper: The dumper to use. data: The user-defined object to dump. Returns: A yaml.Node representing the object.
def __call__(self, dumper: 'Dumper', data: Any) -> yaml.MappingNode: # make a ScalarNode of type str with name of value logger.info('Representing {} of class {}'.format( data, self.class_.__name__)) # convert to a yaml.ScalarNode represented = dumper.represent_str(d...
1,036,645
Generate a directory path, and create it if requested. .. code-block:: Python filepath = mkp('base', 'folder', 'file') dirpath = mkp('root', 'path', 'folder', mk=True) Args: \*args: File or directory path segments to be concatenated mk (bool): Make the directory (if it doesn't...
def mkp(*args, **kwargs): mk = kwargs.pop('mk', False) path = os.sep.join(list(args)) if mk: while sep2 in path: path = path.replace(sep2, os.sep) try: os.makedirs(path) except FileExistsError: pass return path
1,036,728
Reduces bytes to more convenient units (i.e. KiB, GiB, TiB, etc.). Args: values (int): Value in Bytes Returns: tup (tuple): Tuple of value, unit (e.g. (10, 'MiB'))
def convert_bytes(value): n = np.rint(len(str(value))/4).astype(int) return value/(1024**n), sizes[n]
1,036,729
Get a list of modules belonging to the given package. Args: key (str): Package or library name (e.g. "exa")
def get_internal_modules(key='exa'): key += '.' return [v for k, v in sys.modules.items() if k.startswith(key)]
1,036,730
Parse a variant line Split a variant line and map the fields on the header columns Args: variant_line (str): A vcf variant line header_line (list): A list with the header columns Returns: variant_dict (dict): A variant dictionary
def get_variant_dict(variant_line, header_line=None): if not header_line: logger.debug("No header line, use only first 8 mandatory fields") header_line = ['CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO'] logger.debug("Building variant dict from variant line {0} and header"\ "...
1,036,800
Parse a info field of a variant Make a dictionary from the info field of a vcf variant. Keys are the info keys and values are the raw strings from the vcf If the field only have a key (no value), value of infodict is True. Args: info_line (str): The info fie...
def get_info_dict(info_line): variant_info = {} for raw_info in info_line.split(';'): splitted_info = raw_info.split('=') if len(splitted_info) == 2: variant_info[splitted_info[0]] = splitted_info[1] else: variant_info[splitted_info[0]] = True r...
1,036,801
Build a variant id The variant id is a string made of CHROM_POS_REF_ALT Args: variant_dict (dict): A variant dictionary Returns: variant_id (str)
def get_variant_id(variant_dict=None, variant_line=None): if variant_dict: chrom = variant_dict['CHROM'] position = variant_dict['POS'] ref = variant_dict['REF'] alt = variant_dict['ALT'] elif variant_line: splitted_line = variant_line.rstrip().split('\t') ...
1,036,802
Make the vep annotations into a dictionaries A vep dictionary will have the vep column names as keys and the vep annotations as values. The dictionaries are stored in a list Args: vep_string (string): A string with the CSQ annotation vep_header (list): A li...
def get_vep_info(vep_string, vep_header): vep_annotations = [ dict(zip(vep_header, vep_annotation.split('|'))) for vep_annotation in vep_string.split(',') ] return vep_annotations
1,036,803
Make the vep annotations into a dictionaries A snpeff dictionary will have the snpeff column names as keys and the vep annotations as values. The dictionaries are stored in a list. One dictionary for each transcript. Args: snpeff_string (string): A string with...
def get_snpeff_info(snpeff_string, snpeff_header): snpeff_annotations = [ dict(zip(snpeff_header, snpeff_annotation.split('|'))) for snpeff_annotation in snpeff_string.split(',') ] return snpeff_annotations
1,036,804
Replace the information of a info field of a vcf variant line or a variant dict. Arguments: variant_line (str): A vcf formatted variant line variant_dict (dict): A variant dictionary keyword (str): The info field key annotation (str): If the annotation is a key, value p...
def replace_vcf_info(keyword, annotation, variant_line=None, variant_dict=None): new_info = '{0}={1}'.format(keyword, annotation) logger.debug("Replacing the variant information {0}".format(new_info)) fixed_variant = None new_info_list = [] if variant_line: logger.debug("...
1,037,009
Remove the information of a info field of a vcf variant line or a variant dict. Arguments: variant_line (str): A vcf formatted variant line variant_dict (dict): A variant dictionary keyword (str): The info field key Returns: variant_line (str): A annotated vari...
def remove_vcf_info(keyword, variant_line=None, variant_dict=None): logger.debug("Removing variant information {0}".format(keyword)) fixed_variant = None def get_new_info_string(info_string, keyword): new_info_list = [] splitted_info_string = info_string.split(';') ...
1,037,010
Add information to the info field of a vcf variant line. Arguments: variant_line (str): A vcf formatted variant line keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value ...
def add_vcf_info(keyword, variant_line=None, variant_dict=None, annotation=None): logger = logging.getLogger(__name__) if annotation: new_info = '{0}={1}'.format(keyword, annotation) else: new_info = keyword logger.debug("Adding new variant information {0}".format(new_info...
1,037,011
Add fileformat line to the header. Arguments: fileformat (str): The id of the info line
def add_fileformat(self, fileformat): self.fileformat = fileformat logger.info("Adding fileformat to vcf: {0}".format(fileformat)) return
1,037,091
Adds an arbitrary metadata line to the header. This must be a key value pair Arguments: key (str): The key of the metadata line value (str): The value of the metadata line
def add_meta_line(self, key, value): meta_line = '##{0}={1}'.format( key, value ) logger.info("Adding meta line to vcf: {0}".format(meta_line)) self.parse_meta_data(meta_line) return
1,037,092
Add a filter line to the header. Arguments: filter_id (str): The id of the filter line description (str): A description of the info line
def add_filter(self, filter_id, description): filter_line = '##FILTER=<ID={0},Description="{1}">'.format( filter_id, description ) logger.info("Adding filter line to vcf: {0}".format(filter_line)) self.parse_meta_data(filter_line) return
1,037,093
Add a format line to the header. Arguments: format_id (str): The id of the format line number (str): Integer or any of [A,R,G,.] entry_type (str): Any of [Integer,Float,Flag,Character,String] description (str): A description of the info line
def add_format(self, format_id, number, entry_type, description): format_line = '##FORMAT=<ID={0},Number={1},Type={2},Description="{3}">'.format( format_id, number, entry_type, description ) logger.info("Adding format line to vcf: {0}".format(format_line)) self.parse...
1,037,094
Add a alternative allele format field line to the header. Arguments: alt_id (str): The id of the alternative line description (str): A description of the info line
def add_alt(self, alt_id, description): alt_line = '##ALT=<ID={0},Description="{1}">'.format( alt_id, description ) logger.info("Adding alternative allele line to vcf: {0}".format(alt_line)) self.parse_meta_data(alt_line) return
1,037,095
Add a contig line to the header. Arguments: contig_id (str): The id of the alternative line length (str): A description of the info line
def add_contig(self, contig_id, length): contig_line = '##contig=<ID={0},length={1}>'.format( contig_id, length ) logger.info("Adding contig line to vcf: {0}".format(contig_line)) self.parse_meta_data(contig_line) return
1,037,096
Expand given requirements file by extending it using pip freeze args: input_requirements_filename: the requirements filename to expand output_requirements_filename: the output filename for the expanded requirements file
def expand(conf, output_requirements_filename, input_requirements_filename): exit_if_file_not_exists(input_requirements_filename, conf) cireqs.expand_requirements( requirements_filename=input_requirements_filename, expanded_requirements_filename=output_requirements_filename, **conf....
1,037,108
Verifying that given requirements file is not missing any pins args: input_requirements_filename: requriements file to verify
def verify(conf, input_requirements_filename): exit_if_file_not_exists(input_requirements_filename, conf) cireqs.check_if_requirements_are_up_to_date( requirements_filename=input_requirements_filename, **conf._asdict()) click.echo(click.style('✓', fg='green') + " {} has been verified"....
1,037,110
Return the matches of the given state |methcoro| Args: state: see :class:`MatchState` Raises: APIException
async def get_matches(self, state: MatchState = MatchState.all_): matches = await self.connection('GET', 'tournaments/{}/matches'.format(self._tournament_id), state=state.value, parti...
1,037,127
Standard dict-like .get() method. Args: item (str): See :meth:`.__getitem__` for details. alt (default None): Alternative value, if item is not found. Returns: obj: `item` or `alt`, if item is not found.
def get(self, item, alt=None): try: val = self[item] except ValueError: return alt return val if val is not None else alt
1,037,151
set the tournament start date (and check in duration) |methcoro| Args: date: fomatted date as YYYY/MM/DD (2017/02/14) time: fromatted time as HH:MM (20:15) check_in_duration (optional): duration in minutes Raises: APIException
async def set_start_date(self, date: str, time: str, check_in_duration: int = None): date_time = datetime.strptime(date + ' ' + time, '%Y/%m/%d %H:%M') res = await self.connection('PUT', 'tournaments/{}'.format(self._id), '...
1,037,224
|methcoro| Args: match_win match_tie game_win game_tie bye Raises: APIException
async def setup_swiss_points(self, match_win: float = None, match_tie: float = None, game_win: float = None, game_tie: float = None, bye: float = None): params = {} if match_win is not None: params['pts_for_match_win'] = match_win if match_win is not None: params...
1,037,225
|methcoro| Args: match_win match_tie game_win game_tie Raises: APIException
async def setup_round_robin_points(self, match_win: float = None, match_tie: float = None, game_win: float = None, game_tie: float = None): params = {} if match_win is not None: params['rr_pts_for_match_win'] = match_win if match_win is not None: params['rr_pts_f...
1,037,226
update participants notifications for this tournament |methcoro| Args: on_match_open: Email registered Challonge participants when matches open up for them on_tournament_end: Email registered Challonge participants the results when this tournament ends Raises: ...
async def update_notifications(self, on_match_open: bool = None, on_tournament_end: bool = None): params = {} if on_match_open is not None: params['notify_users_when_matches_open'] = on_match_open if on_tournament_end is not None: params['notify_users_when_the_to...
1,037,227
|methcoro| Args: hide_forum: Hide the forum tab on your Challonge page show_rounds: Double Elimination only - Label each round above the bracket open_signup: Have Challonge host a sign-up page (otherwise, you manually add all participants) Raises: APIExc...
async def update_website_options(self, hide_forum: bool = None, show_rounds: bool = None, open_signup: bool = None): params = {} if hide_forum is not None: params['hide_forum'] = hide_forum if show_rounds is not None: params['show_rounds'] = show_rounds i...
1,037,228
|methcoro| Args: pairing: Raises: APIException
async def update_pairing_method(self, pairing: Pairing): do_sequential_pairing = pairing == Pairing.sequential await self.update(sequential_pairings=do_sequential_pairing)
1,037,229
get a participant by its id |methcoro| Args: p_id: participant id force_update (dfault=False): True to force an update to the Challonge API Returns: Participant: None if not found Raises: APIException
async def get_participant(self, p_id: int, force_update=False) -> Participant: found_p = self._find_participant(p_id) if force_update or found_p is None: await self.get_participants() found_p = self._find_participant(p_id) return found_p
1,037,230
get all participants |methcoro| Args: force_update (default=False): True to force an update to the Challonge API Returns: list[Participant]: Raises: APIException
async def get_participants(self, force_update=False) -> list: if force_update or self.participants is None: res = await self.connection('GET', 'tournaments/{}/participants'.format(self._id)) self._refresh_participants_from_json(res) return self.participants or []
1,037,231
search a participant by (display) name |methcoro| Args: name: display name of the participant force_update (dfault=False): True to force an update to the Challonge API Returns: Participant: None if not found Raises: APIException
async def search_participant(self, name, force_update=False): if force_update or self.participants is None: await self.get_participants() if self.participants is not None: for p in self.participants: if p.name == name: return p ...
1,037,232
remove a participant from the tournament |methcoro| Args: p: the participant to remove Raises: APIException
async def remove_participant(self, p: Participant): await self.connection('DELETE', 'tournaments/{}/participants/{}'.format(self._id, p._id)) if p in self.participants: self.participants.remove(p)
1,037,235
get a single match by id |methcoro| Args: m_id: match id force_update (default=False): True to force an update to the Challonge API Returns: Match Raises: APIException
async def get_match(self, m_id, force_update=False) -> Match: found_m = self._find_match(m_id) if force_update or found_m is None: await self.get_matches() found_m = self._find_match(m_id) return found_m
1,037,236
get all matches (once the tournament is started) |methcoro| Args: force_update (default=False): True to force an update to the Challonge API Returns: list[Match]: Raises: APIException
async def get_matches(self, force_update=False) -> list: if force_update or self.matches is None: res = await self.connection('GET', 'tournaments/{}/matches'.format(self._id), include_attachments=1) ...
1,037,237
Generator that yields typed object names of the class (or object's class). Args: obj_or_cls (object): Class object or instance of class Returns: name (array): Names of class attributes that are strongly typed
def yield_typed(obj_or_cls): if not isinstance(obj_or_cls, type): obj_or_cls = type(obj_or_cls) for attrname in dir(obj_or_cls): if hasattr(obj_or_cls, attrname): attr = getattr(obj_or_cls, attrname) # !!! Important hardcoded value here !!! if (isinstance...
1,037,293
Construct the property. Args: name (str): Attribute (property) name Returns: prop (property): Custom property definition with support for typing
def __call__(self, name): priv = "_" + name # Reference to the variable's value # The following is a definition of a Python property. Properties have # get, set, and delete functions as well as documentation. The variable # "this" references the class object instance where t...
1,037,294
Get the choices for the given fields. Args: field (str): Name of field. Returns: List of tuples. [(name, value),...]
def get_choices_for(self, field): choices = self._fields[field].choices if isinstance(choices, six.string_types): return [(d['value'], d['name']) for d in self._choices_manager.get_all(choices)] else: return choices
1,037,321
Fills the object's fields with given data dict. Internally calls the self._load_data() method. Args: data (dict): Data to fill object's fields. from_db (bool): if data coming from db then we will use related field type's _load_data method Returns: ...
def set_data(self, data, from_db=False): self._load_data(data, from_db) return self
1,037,322
Looks for changed relation fields between new and old data (before/after save). Creates back_link references for updated fields. Args: old_data: Object's data before save.
def _handle_changed_fields(self, old_data): for link in self.get_links(is_set=False): fld_id = un_camel_id(link['field']) if not old_data or old_data.get(fld_id) != self._data[fld_id]: # self is new or linked model changed if self._data[fld_id]: ...
1,037,328
Recognize a node that we expect to be a scalar. Args: node: The node to recognize. expected_type: The type it is expected to be. Returns: A list of recognized types and an error message
def __recognize_scalar(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a scalar') if (isinstance(node, yaml.ScalarNode) and node.tag == scalar_type_to_tag[expected_type]): return [expected_type], '' ...
1,037,346
Recognize a node that we expect to be a list of some kind. Args: node: The node to recognize. expected_type: List[...something...] Returns expected_type and the empty string if it was recognized, [] and an error message otherwise.
def __recognize_list(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a list') if not isinstance(node, yaml.SequenceNode): message = '{}{}Expected a list here.'.format( node.start_mark, os.linesep) ...
1,037,347
Recognize a node that we expect to be a dict of some kind. Args: node: The node to recognize. expected_type: Dict[str, ...something...] Returns: expected_type if it was recognized, [] otherwise.
def __recognize_dict(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a dict') if not issubclass(generic_type_args(expected_type)[0], str): raise RuntimeError( 'YAtiML only supports dicts with strings as ...
1,037,348
Recognize a node that we expect to be one of a union of types. Args: node: The node to recognize. expected_type: Union[...something...] Returns: The specific type that was recognized, multiple, or none.
def __recognize_union(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a union') recognized_types = [] message = '' union_types = generic_type_args(expected_type) logger.debug('Union types {}'.format(union_t...
1,037,349
Recognize a user-defined class in the node. This tries to recognize only exactly the specified class. It \ recurses down into the class's attributes, but not to its \ subclasses. See also __recognize_user_classes(). Args: node: The node to recognize. expected_ty...
def __recognize_user_class(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a user-defined class') loc_str = '{}{}'.format(node.start_mark, os.linesep) if hasattr(expected_type, 'yatiml_recognize'): try: ...
1,037,350
Uses riak http search query endpoint for advanced SOLR queries. Args: field (str): facet field count_deleted (bool): ignore deleted or not Returns: (dict): pairs of field values and number of counts
def distinct_values_of(self, field, count_deleted=False): solr_params = "facet=true&facet.field=%s&rows=0" % field result = self.riak_http_search_query(self.index_name, solr_params, count_deleted) facet_fields = result['facet_counts']['facet_fields'][field] keys = facet_fields[0...
1,037,404
Sends given tuples of list to multiget method and took riak objs' keys and data. For each multiget call, separate pools are used and after execution, pools are stopped. Args: key_list_tuple(list of tuples): [('bucket_type','bucket','riak_key')] Ex...
def riak_multi_get(self, key_list_tuple): pool = PyokoMG() objs = self._client.multiget(key_list_tuple, pool=pool) pool.stop() return objs
1,037,407
Writes a copy of the objects current state to write-once mirror bucket. Args: data (dict): Model instance's all data for versioning. model (instance): Model instance. Returns: Key of version record. key (str): Version_bucket key.
def _write_version(self, data, model): vdata = {'data': data, 'key': model.key, 'model': model.Meta.bucket_name, 'timestamp': time.time()} obj = version_bucket.new(data=vdata) obj.add_index('key_bin', model.key) obj.add_index('m...
1,037,412
Creates a log entry for current object, Args: version_key(str): Version_bucket key from _write_version(). meta_data (dict): JSON serializable meta data for logging of save operation. {'lorem': 'ipsum', 'dolar': 5} index_fields (list): Tuple list for secondary ...
def _write_log(self, version_key, meta_data, index_fields): meta_data = meta_data or {} meta_data.update({ 'version_key': version_key, 'timestamp': time.time(), }) obj = log_bucket.new(data=meta_data) obj.add_index('version_key_bin', version_key) ...
1,037,413
If key is not None, tries to get obj from cache first. If not found, tries to get from riak and sets to cache. If key is None, then execute solr query and checks result. Returns obj data and key tuple or raises exception ObjectDoesNotExist or MultipleObjectsReturned. Args: ...
def get(self, key=None): if key: key = ub_to_str(key) if settings.ENABLE_CACHING: return self.get_from_cache(key) or self.set_to_cache(self._get_from_riak(key)) else: return self._get_from_riak(key) else: self._e...
1,037,418
Applies query ordering. New parameters are appended to current ones, overwriting existing ones. Args: **args: Order by fields names. Defaults to ascending, prepend with hypen (-) for desecending ordering.
def order_by(self, *args): if self._solr_locked: raise Exception("Query already executed, no changes can be made." "%s %s" % (self._solr_query, self._solr_params) ) for arg in args: if arg.startswith('-'): ...
1,037,421
Escapes query if it's not already escaped. Args: query: Query value. escaped (bool): expresses if query already escaped or not. Returns: Escaped query value.
def _escape_query(self, query, escaped=False): if escaped: return query query = six.text_type(query) for e in ['+', '-', '&&', '||', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', '*', '?', ':', ' ']: query = query.replace(e, "\\%s" % e) ...
1,037,424
Parses query_value according to query_type Args: modifier (str): Type of query. Exact, contains, lte etc. qval: Value partition of the query. Returns: Parsed query_value.
def _parse_query_modifier(self, modifier, qval, is_escaped): if modifier == 'range': if not qval[0]: start = '*' elif isinstance(qval[0], date): start = self._handle_date(qval[0]) elif isinstance(qval[0], datetime): sta...
1,037,425
Strips query modifier from key and call's the appropriate value modifier. Args: key (str): Query key val: Query value Returns: Parsed query key and value.
def _parse_query_key(self, key, val, is_escaped): if key.endswith('__contains'): key = key[:-10] val = self._parse_query_modifier('contains', val, is_escaped) elif key.endswith('__range'): key = key[:-7] val = self._parse_query_modifier('range', v...
1,037,426
Return the chromosome priority Arguments: chrom (str): The cromosome name from the vcf chrom_dict (dict): A map of chromosome names and theis priority Return: priority (str): The priority for this chromosom
def get_chromosome_priority(chrom, chrom_dict={}): priority = 0 chrom = str(chrom).lstrip('chr') if chrom_dict: priority = chrom_dict.get(chrom, 0) else: try: if int(chrom) < 23: priority = int(chrom) except ValueError: ...
1,037,467
Sort the variants of a vcf file Args: vcf_handle mode (str): position or rank score Returns: sorted_variants (Iterable): An iterable with sorted variants
def sort_variants(vcf_handle): logger.debug("Creating temp file") temp_file = NamedTemporaryFile(delete=False) temp_file.close() logger.debug("Opening temp file with codecs") temp_file_handle = codecs.open( temp_file.name, mode='w', ...
1,037,468
Sort a modified variant file. Sorting is based on the first column and the POS. Uses unix sort to sort the variants and overwrites the infile. Args: infile : A string that is the path to a file mode : 'chromosome' or 'rank' outfile : The path to an outfile where the variant...
def sort_variant_file(infile): command = [ 'sort', ] command.append('-n') command.append('-k1') command.append('-k3') command = command + [infile, '-o', infile] logger.info("Start sorting variants...") logger.info("Sort command: {0}".format(' '.join(command))) ...
1,037,469
prepare the model fields, nodes and relations Args: node_name (str): name of the node we are currently processing attrs (dict): attribute dict class_type (str): Type of class. Can be one of these: 'ListNode', 'Model', 'Node'
def process_attributes_of_node(attrs, node_name, class_type): # print("Node: %s" % node_name) attrs['_nodes'] = {} attrs['_linked_models'] = defaultdict(list) attrs['_debug_linked_models'] = defaultdict(list) attrs['_lazy_linked_models'] = defaultdict(list) attrs...
1,037,743
Creates a new user, validate its credentials and returns it |funccoro| Args: username: username as specified on the challonge website api_key: key as found on the challonge `settings <https://challonge.com/settings/developer>`_ Returns: User: a logged in user if no exc...
async def get_user(username: str, api_key: str, **kwargs) -> User: new_user = User(username, api_key, **kwargs) await new_user.validate() return new_user
1,037,753
gets all user's tournaments |methcoro| Args: subdomain: *optional* subdomain needs to be given explicitely to get tournaments in a subdomain force_update: *optional* set to True to force the data update from Challonge Returns: list[Tournament]: list of all ...
async def get_tournaments(self, subdomain: str = None, force_update: bool = False) -> list: if self.tournaments is None: force_update = True self._subdomains_searched.append('' if subdomain is None else subdomain) elif subdomain is None and '' not in self._subdomains_sea...
1,037,759
creates a simple tournament with basic options |methcoro| Args: name: name of the new tournament url: url of the new tournament (http://challonge.com/url) tournament_type: Defaults to TournamentType.single_elimination params: optional params (see http://...
async def create_tournament(self, name: str, url: str, tournament_type: TournamentType = TournamentType.single_elimination, **params) -> Tournament: params.update({ 'name': name, 'url': url, 'tournament_type': tournament_type.value, }) res = await sel...
1,037,760
converts py2 unicode / py3 bytestring into str Args: string (unicode, byte_string): string to be converted Returns: (str)
def ub_to_str(string): if not isinstance(string, str): if six.PY2: return str(string) else: return string.decode() return string
1,037,765
Prettier print for nested data Args: input: Input data return_data (bool): Default False. Print outs if False, returns if True. Returns: None | Pretty formatted text representation of input data.
def pprnt(input, return_data=False): HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[32m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' import json, re result = json.dumps(input, sort_keys=True, indent=4) result = ...
1,037,768
Register user-defined classes with the Dumper. This enables the Dumper to write objects of your classes to a \ YAML file. Note that all the arguments are types, not instances! Args: dumper: Your dumper class(!), derived from yatiml.Dumper classes: One or more classes to add.
def add_to_dumper(dumper: Type, classes: List[Type]) -> None: if not isinstance(classes, list): classes = [classes] # type: ignore for class_ in classes: if issubclass(class_, enum.Enum): dumper.add_representer(class_, EnumRepresenter(class_)) elif issubclass(class_, st...
1,037,817
Display Warning. Method prints the warning message, message being given as an input. Arguments: message {string} -- The message to be displayed.
def warning(message, code='WARNING'): now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') output = now + ' [' + torn.plugins.colors.WARNING + \ code + torn.plugins.colors.ENDC + '] \t' + \ message print(output)
1,037,868
Display Information. Method prints the information message, message being given as an input. Arguments: message {string} -- The message to be displayed.
def info(message, code='INFO'): now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') output = now + ' [' + torn.plugins.colors.OKBLUE + \ code + torn.plugins.colors.ENDC + '] \t' + \ message print(output)
1,037,869
Display Error. Method prints the error message, message being given as an input. Arguments: message {string} -- The message to be displayed.
def error(message, code='ERROR'): now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') output = now + ' [' + torn.plugins.colors.FAIL + \ code + torn.plugins.colors.ENDC + '] \t' + \ message print(output)
1,037,870
Sort values, but put numbers after alphabetically sorted words. This function is here to make outputs diff-compatible with Aleph. Example:: >>> sorted(["b", "1", "a"]) ['1', 'a', 'b'] >>> resorted(["b", "1", "a"]) ['a', 'b', '1'] Args: values (iterable): any iterab...
def resorted(values): if not values: return values values = sorted(values) # look for first word first_word = next( (cnt for cnt, val in enumerate(values) if val and not val[0].isdigit()), None ) # if not found, just return the values if first_wor...
1,037,979
Sets the value of the node to a scalar value. After this, is_scalar(type(value)) will return true. Args: value: The value to set this node to, a str, int, float, \ bool, or None.
def set_value(self, value: ScalarType) -> None: if isinstance(value, bool): value_str = 'true' if value else 'false' else: value_str = str(value) start_mark = self.yaml_node.start_mark end_mark = self.yaml_node.end_mark # If we're of a class type,...
1,038,042
Whether the node has an attribute with the given name. Use only if is_mapping() returns True. Args: attribute: The name of the attribute to check for. Returns: True iff the attribute is present.
def has_attribute(self, attribute: str) -> bool: return any([ key_node.value == attribute for key_node, _ in self.yaml_node.value ])
1,038,044
Returns the node representing the given attribute's value. Use only if is_mapping() returns true. Args: attribute: The name of the attribute to retrieve. Raises: KeyError: If the attribute does not exist. Returns: A node representing the value.
def get_attribute(self, attribute: str) -> 'Node': matches = [ value_node for key_node, value_node in self.yaml_node.value if key_node.value == attribute ] if len(matches) != 1: raise SeasoningError( 'Attribute not found, or found mult...
1,038,046
Remove an attribute from the node. Use only if is_mapping() returns True. Args: attribute: The name of the attribute to remove.
def remove_attribute(self, attribute: str) -> None: attr_index = self.__attr_index(attribute) if attr_index is not None: self.yaml_node.value.pop(attr_index)
1,038,048
Renames an attribute. Use only if is_mapping() returns true. If the attribute does not exist, this will do nothing. Args: attribute: The (old) name of the attribute to rename. new_name: The new name to rename it to.
def rename_attribute(self, attribute: str, new_name: str) -> None: for key_node, _ in self.yaml_node.value: if key_node.value == attribute: key_node.value = new_name break
1,038,049
Create an UnknownNode for a particular mapping node. The member functions will act on the contained node. Args: node: The node to operate on.
def __init__(self, recognizer: IRecognizer, node: yaml.Node) -> None: self.__recognizer = recognizer self.yaml_node = node
1,038,055
Require the node to be a scalar. If additional arguments are passed, these are taken as a list \ of valid types; if the node matches one of these, then it is \ accepted. Example: # Match either an int or a string node.require_scalar(int, str) Arguments:...
def require_scalar(self, *args: Type) -> None: node = Node(self.yaml_node) if len(args) == 0: if not node.is_scalar(): raise RecognitionError(('{}{}A scalar is required').format( self.yaml_node.start_mark, os.linesep)) else: fo...
1,038,056
Require an attribute on the node to exist. If `typ` is given, the attribute must have this type. Args: attribute: The name of the attribute / mapping key. typ: The type the attribute must have.
def require_attribute(self, attribute: str, typ: Type = _Any) -> None: attr_nodes = [ value_node for key_node, value_node in self.yaml_node.value if key_node.value == attribute ] if len(attr_nodes) == 0: raise RecognitionError( ('{}{}M...
1,038,059
Stores the data at self._data, actual object creation done at _generate_instances() Args: data (list): List of dicts. from_db (bool): Default False. Is this data coming from DB or not.
def _load_data(self, data, from_db=False): self._data = data[:] self.setattrs( values=[], node_stack=[], node_dict={}, ) self._from_db = from_db
1,038,127
Create a ListNode instance from node_data Args: node_data (dict): Data to create ListNode item. Returns: ListNode item.
def _make_instance(self, node_data): node_data['from_db'] = self._from_db clone = self.__call__(**node_data) clone.setattrs(container = self, _is_item = True) for name in self._nodes: _name = un_camel(name) if _name in node_data: # ch...
1,038,129
Allow usage of "del" statement on ListNodes with bracket notation. Args: obj: ListNode item or relation key. Raises: TypeError: If it's called on a ListNode item (intstead of ListNode's itself)
def __delitem__(self, obj, sync=True): if self._is_item: raise TypeError("This an item of the parent ListNode") list(self._generate_instances()) _lnk_key = None if isinstance(obj, six.string_types): _lnk_key = obj _obj = self.node_dict[obj] ...
1,038,135
Slice a data object based on its index, either by value (.loc) or position (.iloc). Args: key: Single index value, slice, tuple, or list of indices/positionals Returns: data: Slice of self
def slice_naive(self, key): cls = self.__class__ key = check_key(self, key) return cls(self.loc[key])
1,038,226
Naively (on index) slice the field data and values. Args: key: Int, slice, or iterable to select data and values Returns: field: Sliced field object
def slice_naive(self, key): cls = self.__class__ key = check_key(self, key) enum = pd.Series(range(len(self))) enum.index = self.index values = self.field_values[enum[key].values] data = self.loc[key] return cls(data, field_values=values)
1,038,239
change the url of that attachment |methcoro| Args: url: url you want to change description: *optional* description for your attachment Raises: ValueError: url must not be None APIException
async def change_url(self, url: str, description: str = None): await self._change(url=url, description=description)
1,038,637
change the file of that attachment |methcoro| Warning: |unstable| Args: file_path: path to the file you want to add / modify description: *optional* description for your attachment Raises: ValueError: file_path must not be None ...
async def change_file(self, file_path: str, description: str = None): with open(file_path, 'rb') as f: await self._change(asset=f.read())
1,038,638
Bindings to GNU Lightning library. Args: liblightning: Set to override path to liblightning. program: Set to override argument to init_jit, used with bfd.
def __init__(self, liblightning=None, program=None): self._load(liblightning) self._set_signatures() self._init() self._executable = None
1,038,714
Converts any sequence or mapping to list or OrderedDict Stops at anything that isn't a sequence or a mapping. One day, we'll extract the comments and formatting and store \ them out-of-band. Args: mapping: The mapping of constructed subobjects to edit
def __to_plain_containers(self, container: Union[CommentedSeq, CommentedMap] ) -> Union[OrderedDict, list]: if isinstance(container, CommentedMap): new_container = OrderedDict() # type: Union[OrderedDict, list] for key...
1,038,721
Checks that the object matches the given type. Like isinstance(), but will work with union types using Union, \ Dict and List. Args: obj: The object to check type_: The type to check against Returns: True iff obj is of type type_
def __type_matches(self, obj: Any, type_: Type) -> bool: if is_generic_union(type_): for t in generic_type_args(type_): if self.__type_matches(obj, t): return True return False elif is_generic_list(type_): if not isinstance...
1,038,723
Checks that all required attributes are present. Also checks that they're of the correct type. Args: mapping: The mapping with subobjects of this object. Raises: RecognitionError: if an attribute is missing or the type \ is incorrect.
def __check_no_missing_attributes(self, node: yaml.Node, mapping: CommentedMap) -> None: logger.debug('Checking presence of required attributes') for name, type_, required in class_subobjects(self.class_): if required and name not in mapping: ...
1,038,724
Strips tags from extra attributes. This prevents nodes under attributes that are not part of our \ data model from being converted to objects. They'll be plain \ CommentedMaps instead, which then get converted to OrderedDicts \ for the user. Args: node: The node to ...
def __strip_extra_attributes(self, node: yaml.Node, known_attrs: List[str]) -> None: known_keys = list(known_attrs) known_keys.remove('self') if 'yatiml_extra' in known_keys: known_keys.remove('yatiml_extra') for key_node, value_node...
1,038,726
Strips tags from mappings in the tree headed by node. This keeps yaml from constructing any objects in this tree. Args: node: Head of the tree to strip
def __strip_tags(self, node: yaml.Node) -> None: if isinstance(node, yaml.SequenceNode): for subnode in node.value: self.__strip_tags(subnode) elif isinstance(node, yaml.MappingNode): node.tag = 'tag:yaml.org,2002:map' for key_node, value_node...
1,038,727
Generate a file from the current template and given arguments. Warning: Make certain to check the formatted editor for correctness! Args: args: Positional arguments to update the template kwargs: Keyword arguments to update the template Returns: ...
def compose(self, *args, **kwargs): linebreak = kwargs.pop("linebreak", "\n") # Update the internally stored args/kwargs from which formatting arguments come if len(args) > 0: self.args = args self._update(**kwargs) # Format string arguments (for the m...
1,038,771
Print the vcf headers. If a result file is provided headers will be printed here, otherwise they are printed to stdout. Args: head (HeaderParser): A vcf header object outfile (FileHandle): A file handle silent (Bool): If nothing should be printed.
def print_headers(head, outfile=None, silent=False): for header_line in head.print_header(): if outfile: outfile.write(header_line+'\n') else: if not silent: print(header_line) return
1,038,782